aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/ext/filters/client_channel/client_channel.cc492
-rw-r--r--src/core/ext/filters/client_channel/http_connect_handshaker.cc2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.cc4
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.h4
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc21
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc10
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc31
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc22
-rw-r--r--src/core/ext/filters/client_channel/method_params.h4
-rw-r--r--src/core/ext/filters/client_channel/resolver.h4
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc30
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc5
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc30
-rw-r--r--src/core/ext/filters/client_channel/retry_throttle.h4
-rw-r--r--src/core/ext/filters/http/message_compress/message_compress_filter.cc4
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.cc25
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_settings.cc6
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_encoder.cc4
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.cc2
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_table.cc4
-rw-r--r--src/core/ext/transport/chttp2/transport/stream_lists.cc6
-rw-r--r--src/core/ext/transport/chttp2/transport/writing.cc10
-rw-r--r--src/core/ext/transport/inproc/inproc_transport.cc84
-rw-r--r--src/core/lib/channel/channel_args.cc28
-rw-r--r--src/core/lib/channel/channel_args.h4
-rw-r--r--src/core/lib/channel/handshaker.cc47
-rw-r--r--src/core/lib/channel/handshaker.h4
-rw-r--r--src/core/lib/debug/trace.h11
-rw-r--r--src/core/lib/gprpp/orphanable.h8
-rw-r--r--src/core/lib/gprpp/ref_counted.h8
-rw-r--r--src/core/lib/iomgr/call_combiner.cc26
-rw-r--r--src/core/lib/iomgr/closure.h6
-rw-r--r--src/core/lib/iomgr/combiner.cc22
-rw-r--r--src/core/lib/iomgr/ev_epoll1_linux.cc48
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.cc54
-rw-r--r--src/core/lib/iomgr/ev_epollsig_linux.cc4
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.cc6
-rw-r--r--src/core/lib/iomgr/ev_posix.cc6
-rw-r--r--src/core/lib/iomgr/executor.cc12
-rw-r--r--src/core/lib/iomgr/resource_quota.cc21
-rw-r--r--src/core/lib/iomgr/tcp_client_custom.cc4
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.cc8
-rw-r--r--src/core/lib/iomgr/tcp_custom.cc20
-rw-r--r--src/core/lib/iomgr/tcp_posix.cc50
-rw-r--r--src/core/lib/iomgr/tcp_server_custom.cc10
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.cc2
-rw-r--r--src/core/lib/iomgr/tcp_uv.cc3
-rw-r--r--src/core/lib/iomgr/tcp_windows.cc16
-rw-r--r--src/core/lib/iomgr/timer_generic.cc34
-rw-r--r--src/core/lib/iomgr/timer_manager.cc23
-rw-r--r--src/core/lib/iomgr/timer_uv.cc3
-rw-r--r--src/core/lib/profiling/basic_timers.cc1
-rw-r--r--src/core/lib/security/security_connector/security_connector.cc35
-rw-r--r--src/core/lib/security/security_connector/security_connector.h7
-rw-r--r--src/core/lib/security/transport/secure_endpoint.cc4
-rw-r--r--src/core/lib/security/transport/security_handshaker.cc8
-rw-r--r--src/core/lib/slice/slice_hash_table.h4
-rw-r--r--src/core/lib/slice/slice_weak_hash_table.h4
-rw-r--r--src/core/lib/surface/call.cc8
-rw-r--r--src/core/lib/surface/server.cc16
-rw-r--r--src/core/lib/transport/bdp_estimator.cc6
-rw-r--r--src/core/lib/transport/bdp_estimator.h4
-rw-r--r--src/core/lib/transport/connectivity_state.cc13
-rw-r--r--src/core/tsi/ssl/session_cache/ssl_session_cache.h4
64 files changed, 841 insertions, 569 deletions
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index a10bfea8b1..80a647fa94 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -174,7 +174,7 @@ static void set_channel_connectivity_state_locked(channel_data* chand,
}
}
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p: setting connectivity state to %s", chand,
+ gpr_log(GPR_INFO, "chand=%p: setting connectivity state to %s", chand,
grpc_connectivity_state_name(state));
}
grpc_connectivity_state_set(&chand->state_tracker, state, error, reason);
@@ -186,7 +186,7 @@ static void on_lb_policy_state_changed_locked(void* arg, grpc_error* error) {
/* check if the notification is for the latest policy */
if (w->lb_policy == w->chand->lb_policy.get()) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p: lb_policy=%p state changed to %s", w->chand,
+ gpr_log(GPR_INFO, "chand=%p: lb_policy=%p state changed to %s", w->chand,
w->lb_policy, grpc_connectivity_state_name(w->state));
}
set_channel_connectivity_state_locked(w->chand, w->state,
@@ -215,7 +215,7 @@ static void watch_lb_policy_locked(channel_data* chand,
static void start_resolving_locked(channel_data* chand) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p: starting name resolution", chand);
+ gpr_log(GPR_INFO, "chand=%p: starting name resolution", chand);
}
GPR_ASSERT(!chand->started_resolving);
chand->started_resolving = true;
@@ -297,7 +297,7 @@ static void request_reresolution_locked(void* arg, grpc_error* error) {
return;
}
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p: started name re-resolving", chand);
+ gpr_log(GPR_INFO, "chand=%p: started name re-resolving", chand);
}
chand->resolver->RequestReresolutionLocked();
// Give back the closure to the LB policy.
@@ -311,7 +311,7 @@ static void request_reresolution_locked(void* arg, grpc_error* error) {
static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
channel_data* chand = static_cast<channel_data*>(arg);
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p: got resolver result: resolver_result=%p error=%s", chand,
chand->resolver_result, grpc_error_string(error));
}
@@ -431,7 +431,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
}
}
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p: resolver result: lb_policy_name=\"%s\"%s, "
"service_config=\"%s\"",
chand, lb_policy_name_dup,
@@ -466,7 +466,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
chand->resolver == nullptr) {
if (chand->lb_policy != nullptr) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p: unreffing lb_policy=%p", chand,
+ gpr_log(GPR_INFO, "chand=%p: unreffing lb_policy=%p", chand,
chand->lb_policy.get());
}
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
@@ -480,11 +480,11 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
// error or shutdown.
if (error != GRPC_ERROR_NONE || chand->resolver == nullptr) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p: shutting down", chand);
+ gpr_log(GPR_INFO, "chand=%p: shutting down", chand);
}
if (chand->resolver != nullptr) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p: shutting down resolver", chand);
+ gpr_log(GPR_INFO, "chand=%p: shutting down resolver", chand);
}
chand->resolver.reset();
}
@@ -506,7 +506,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
if (lb_policy_created) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p: initializing new LB policy", chand);
+ gpr_log(GPR_INFO, "chand=%p: initializing new LB policy", chand);
}
GRPC_ERROR_UNREF(state_error);
state = chand->lb_policy->CheckConnectivityLocked(&state_error);
@@ -842,10 +842,11 @@ typedef struct {
bool completed_recv_trailing_metadata : 1;
// State for callback processing.
bool retry_dispatched : 1;
- bool recv_initial_metadata_ready_deferred : 1;
- bool recv_message_ready_deferred : 1;
+ subchannel_batch_data* recv_initial_metadata_ready_deferred_batch;
grpc_error* recv_initial_metadata_error;
+ subchannel_batch_data* recv_message_ready_deferred_batch;
grpc_error* recv_message_error;
+ subchannel_batch_data* recv_trailing_metadata_internal_batch;
} subchannel_call_retry_state;
// Pending batches stored in call data.
@@ -994,6 +995,39 @@ static void maybe_cache_send_ops_for_batch(call_data* calld,
}
}
+// Frees cached send_initial_metadata.
+static void free_cached_send_initial_metadata(channel_data* chand,
+ call_data* calld) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "chand=%p calld=%p: destroying calld->send_initial_metadata", chand,
+ calld);
+ }
+ grpc_metadata_batch_destroy(&calld->send_initial_metadata);
+}
+
+// Frees cached send_message at index idx.
+static void free_cached_send_message(channel_data* chand, call_data* calld,
+ size_t idx) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR "]",
+ chand, calld, idx);
+ }
+ (*calld->send_messages)[idx]->Destroy();
+}
+
+// Frees cached send_trailing_metadata.
+static void free_cached_send_trailing_metadata(channel_data* chand,
+ call_data* calld) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "chand=%p calld=%p: destroying calld->send_trailing_metadata",
+ chand, calld);
+ }
+ grpc_metadata_batch_destroy(&calld->send_trailing_metadata);
+}
+
// Frees cached send ops that have already been completed after
// committing the call.
static void free_cached_send_op_data_after_commit(
@@ -1001,19 +1035,13 @@ static void free_cached_send_op_data_after_commit(
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (retry_state->completed_send_initial_metadata) {
- grpc_metadata_batch_destroy(&calld->send_initial_metadata);
+ free_cached_send_initial_metadata(chand, calld);
}
for (size_t i = 0; i < retry_state->completed_send_message_count; ++i) {
- if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR
- "]",
- chand, calld, i);
- }
- (*calld->send_messages)[i]->Destroy();
+ free_cached_send_message(chand, calld, i);
}
if (retry_state->completed_send_trailing_metadata) {
- grpc_metadata_batch_destroy(&calld->send_trailing_metadata);
+ free_cached_send_trailing_metadata(chand, calld);
}
}
@@ -1025,20 +1053,14 @@ static void free_cached_send_op_data_for_completed_batch(
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (batch_data->batch.send_initial_metadata) {
- grpc_metadata_batch_destroy(&calld->send_initial_metadata);
+ free_cached_send_initial_metadata(chand, calld);
}
if (batch_data->batch.send_message) {
- if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR
- "]",
- chand, calld, retry_state->completed_send_message_count - 1);
- }
- (*calld->send_messages)[retry_state->completed_send_message_count - 1]
- ->Destroy();
+ free_cached_send_message(chand, calld,
+ retry_state->completed_send_message_count - 1);
}
if (batch_data->batch.send_trailing_metadata) {
- grpc_metadata_batch_destroy(&calld->send_trailing_metadata);
+ free_cached_send_trailing_metadata(chand, calld);
}
}
@@ -1066,7 +1088,7 @@ static void pending_batches_add(grpc_call_element* elem,
call_data* calld = static_cast<call_data*>(elem->call_data);
const size_t idx = get_batch_index(batch);
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: adding pending batch at index %" PRIuPTR, chand,
calld, idx);
}
@@ -1094,7 +1116,7 @@ static void pending_batches_add(grpc_call_element* elem,
}
if (calld->bytes_buffered_for_retry > chand->per_rpc_retry_buffer_size) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: exceeded retry buffer size, committing",
chand, calld);
}
@@ -1109,7 +1131,7 @@ static void pending_batches_add(grpc_call_element* elem,
// retries are disabled so that we don't bother with retry overhead.
if (calld->num_attempts_completed == 0) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: disabling retries before first attempt",
chand, calld);
}
@@ -1156,7 +1178,7 @@ static void pending_batches_fail(grpc_call_element* elem, grpc_error* error,
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
if (calld->pending_batches[i].batch != nullptr) ++num_batches;
}
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
elem->channel_data, calld, num_batches, grpc_error_string(error));
}
@@ -1218,7 +1240,7 @@ static void pending_batches_resume(grpc_call_element* elem) {
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
if (calld->pending_batches[i].batch != nullptr) ++num_batches;
}
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: starting %" PRIuPTR
" pending batches on subchannel_call=%p",
chand, calld, num_batches, calld->subchannel_call);
@@ -1263,7 +1285,7 @@ static void maybe_clear_pending_batch(grpc_call_element* elem,
(!batch->recv_message ||
batch->payload->recv_message.recv_message_ready == nullptr)) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: clearing pending batch", chand,
+ gpr_log(GPR_INFO, "chand=%p calld=%p: clearing pending batch", chand,
calld);
}
pending_batch_clear(calld, pending);
@@ -1353,7 +1375,7 @@ static void retry_commit(grpc_call_element* elem,
if (calld->retry_committed) return;
calld->retry_committed = true;
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: committing retries", chand, calld);
+ gpr_log(GPR_INFO, "chand=%p calld=%p: committing retries", chand, calld);
}
if (retry_state != nullptr) {
free_cached_send_op_data_after_commit(elem, retry_state);
@@ -1398,7 +1420,7 @@ static void do_retry(grpc_call_element* elem,
next_attempt_time = calld->retry_backoff->NextAttemptTime();
}
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: retrying failed call in %" PRIuPTR " ms", chand,
calld, next_attempt_time - grpc_core::ExecCtx::Get()->Now());
}
@@ -1432,7 +1454,7 @@ static bool maybe_retry(grpc_call_element* elem,
batch_data->subchannel_call));
if (retry_state->retry_dispatched) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: retry already dispatched", chand,
+ gpr_log(GPR_INFO, "chand=%p calld=%p: retry already dispatched", chand,
calld);
}
return true;
@@ -1444,14 +1466,14 @@ static bool maybe_retry(grpc_call_element* elem,
calld->retry_throttle_data->RecordSuccess();
}
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: call succeeded", chand, calld);
+ gpr_log(GPR_INFO, "chand=%p calld=%p: call succeeded", chand, calld);
}
return false;
}
// Status is not OK. Check whether the status is retryable.
if (!retry_policy->retryable_status_codes.Contains(status)) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: status %s not configured as retryable", chand,
calld, grpc_status_code_to_string(status));
}
@@ -1467,14 +1489,14 @@ static bool maybe_retry(grpc_call_element* elem,
if (calld->retry_throttle_data != nullptr &&
!calld->retry_throttle_data->RecordFailure()) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: retries throttled", chand, calld);
+ gpr_log(GPR_INFO, "chand=%p calld=%p: retries throttled", chand, calld);
}
return false;
}
// Check whether the call is committed.
if (calld->retry_committed) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: retries already committed", chand,
+ gpr_log(GPR_INFO, "chand=%p calld=%p: retries already committed", chand,
calld);
}
return false;
@@ -1483,7 +1505,7 @@ static bool maybe_retry(grpc_call_element* elem,
++calld->num_attempts_completed;
if (calld->num_attempts_completed >= retry_policy->max_attempts) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: exceeded %d retry attempts", chand,
+ gpr_log(GPR_INFO, "chand=%p calld=%p: exceeded %d retry attempts", chand,
calld, retry_policy->max_attempts);
}
return false;
@@ -1491,7 +1513,7 @@ static bool maybe_retry(grpc_call_element* elem,
// If the call was cancelled from the surface, don't retry.
if (calld->cancel_error != GRPC_ERROR_NONE) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: call cancelled from surface, not retrying",
chand, calld);
}
@@ -1504,16 +1526,15 @@ static bool maybe_retry(grpc_call_element* elem,
uint32_t ms;
if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(*server_pushback_md), &ms)) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: not retrying due to server push-back",
chand, calld);
}
return false;
} else {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: server push-back: retry in %u ms", chand,
- calld, ms);
+ gpr_log(GPR_INFO, "chand=%p calld=%p: server push-back: retry in %u ms",
+ chand, calld, ms);
}
server_pushback_ms = (grpc_millis)ms;
}
@@ -1586,7 +1607,7 @@ static void invoke_recv_initial_metadata_callback(void* arg,
batch->payload->recv_initial_metadata.recv_initial_metadata_ready !=
nullptr) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: invoking recv_initial_metadata_ready for "
"pending batch at index %" PRIuPTR,
chand, calld, i);
@@ -1622,7 +1643,7 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: got recv_initial_metadata_ready, error=%s",
chand, calld, grpc_error_string(error));
}
@@ -1637,12 +1658,12 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
if ((batch_data->trailing_metadata_available || error != GRPC_ERROR_NONE) &&
!retry_state->completed_recv_trailing_metadata) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: deferring recv_initial_metadata_ready "
"(Trailers-Only)",
chand, calld);
}
- retry_state->recv_initial_metadata_ready_deferred = true;
+ retry_state->recv_initial_metadata_ready_deferred_batch = batch_data;
retry_state->recv_initial_metadata_error = GRPC_ERROR_REF(error);
if (!retry_state->started_recv_trailing_metadata) {
// recv_trailing_metadata not yet started by application; start it
@@ -1679,7 +1700,7 @@ static void invoke_recv_message_callback(void* arg, grpc_error* error) {
if (batch != nullptr && batch->recv_message &&
batch->payload->recv_message.recv_message_ready != nullptr) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: invoking recv_message_ready for "
"pending batch at index %" PRIuPTR,
chand, calld, i);
@@ -1712,7 +1733,7 @@ static void recv_message_ready(void* arg, grpc_error* error) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: got recv_message_ready, error=%s",
+ gpr_log(GPR_INFO, "chand=%p calld=%p: got recv_message_ready, error=%s",
chand, calld, grpc_error_string(error));
}
subchannel_call_retry_state* retry_state =
@@ -1726,12 +1747,12 @@ static void recv_message_ready(void* arg, grpc_error* error) {
if ((batch_data->recv_message == nullptr || error != GRPC_ERROR_NONE) &&
!retry_state->completed_recv_trailing_metadata) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: deferring recv_message_ready (nullptr "
"message and recv_trailing_metadata pending)",
chand, calld);
}
- retry_state->recv_message_ready_deferred = true;
+ retry_state->recv_message_ready_deferred_batch = batch_data;
retry_state->recv_message_error = GRPC_ERROR_REF(error);
if (!retry_state->started_recv_trailing_metadata) {
// recv_trailing_metadata not yet started by application; start it
@@ -1750,6 +1771,59 @@ static void recv_message_ready(void* arg, grpc_error* error) {
}
//
+// list of closures to execute in call combiner
+//
+
+// Represents a closure that needs to run in the call combiner as part of
+// starting or completing a batch.
+typedef struct {
+ grpc_closure* closure;
+ grpc_error* error;
+ const char* reason;
+ bool free_reason = false;
+} closure_to_execute;
+
+static void execute_closures_in_call_combiner(grpc_call_element* elem,
+ const char* caller,
+ closure_to_execute* closures,
+ size_t num_closures) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ // Note that the call combiner will be yielded for each closure that
+ // we schedule. We're already running in the call combiner, so one of
+ // the closures can be scheduled directly, but the others will
+ // have to re-enter the call combiner.
+ if (num_closures > 0) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_INFO, "chand=%p calld=%p: %s starting closure: %s", chand,
+ calld, caller, closures[0].reason);
+ }
+ GRPC_CLOSURE_SCHED(closures[0].closure, closures[0].error);
+ if (closures[0].free_reason) {
+ gpr_free(const_cast<char*>(closures[0].reason));
+ }
+ for (size_t i = 1; i < num_closures; ++i) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "chand=%p calld=%p: %s starting closure in call combiner: %s",
+ chand, calld, caller, closures[i].reason);
+ }
+ GRPC_CALL_COMBINER_START(calld->call_combiner, closures[i].closure,
+ closures[i].error, closures[i].reason);
+ if (closures[i].free_reason) {
+ gpr_free(const_cast<char*>(closures[i].reason));
+ }
+ }
+ } else {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_INFO, "chand=%p calld=%p: no closures to run for %s", chand,
+ calld, caller);
+ }
+ GRPC_CALL_COMBINER_STOP(calld->call_combiner, "no closures to run");
+ }
+}
+
+//
// on_complete callback handling
//
@@ -1777,36 +1851,35 @@ static void update_retry_state_for_completed_batch(
}
}
-// Represents a closure that needs to run as a result of a completed batch.
-typedef struct {
- grpc_closure* closure;
- grpc_error* error;
- const char* reason;
-} closure_to_execute;
-
// Adds any necessary closures for deferred recv_initial_metadata and
// recv_message callbacks to closures, updating *num_closures as needed.
static void add_closures_for_deferred_recv_callbacks(
subchannel_batch_data* batch_data, subchannel_call_retry_state* retry_state,
closure_to_execute* closures, size_t* num_closures) {
- if (batch_data->batch.recv_trailing_metadata &&
- retry_state->recv_initial_metadata_ready_deferred) {
- closure_to_execute* closure = &closures[(*num_closures)++];
- closure->closure =
- GRPC_CLOSURE_INIT(&batch_data->recv_initial_metadata_ready,
- invoke_recv_initial_metadata_callback, batch_data,
- grpc_schedule_on_exec_ctx);
- closure->error = retry_state->recv_initial_metadata_error;
- closure->reason = "resuming recv_initial_metadata_ready";
- }
- if (batch_data->batch.recv_trailing_metadata &&
- retry_state->recv_message_ready_deferred) {
- closure_to_execute* closure = &closures[(*num_closures)++];
- closure->closure = GRPC_CLOSURE_INIT(&batch_data->recv_message_ready,
- invoke_recv_message_callback,
- batch_data, grpc_schedule_on_exec_ctx);
- closure->error = retry_state->recv_message_error;
- closure->reason = "resuming recv_message_ready";
+ if (batch_data->batch.recv_trailing_metadata) {
+ // Add closure for deferred recv_initial_metadata_ready.
+ if (retry_state->recv_initial_metadata_ready_deferred_batch != nullptr) {
+ closure_to_execute* closure = &closures[(*num_closures)++];
+ closure->closure = GRPC_CLOSURE_INIT(
+ &batch_data->recv_initial_metadata_ready,
+ invoke_recv_initial_metadata_callback,
+ retry_state->recv_initial_metadata_ready_deferred_batch,
+ grpc_schedule_on_exec_ctx);
+ closure->error = retry_state->recv_initial_metadata_error;
+ closure->reason = "resuming recv_initial_metadata_ready";
+ retry_state->recv_initial_metadata_ready_deferred_batch = nullptr;
+ }
+ // Add closure for deferred recv_message_ready.
+ if (retry_state->recv_message_ready_deferred_batch != nullptr) {
+ closure_to_execute* closure = &closures[(*num_closures)++];
+ closure->closure = GRPC_CLOSURE_INIT(
+ &batch_data->recv_message_ready, invoke_recv_message_callback,
+ retry_state->recv_message_ready_deferred_batch,
+ grpc_schedule_on_exec_ctx);
+ closure->error = retry_state->recv_message_error;
+ closure->reason = "resuming recv_message_ready";
+ retry_state->recv_message_ready_deferred_batch = nullptr;
+ }
}
}
@@ -1838,7 +1911,7 @@ static void add_closures_for_replay_or_pending_send_ops(
}
if (have_pending_send_message_ops || have_pending_send_trailing_metadata_op) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: starting next batch for pending send op(s)",
chand, calld);
}
@@ -1863,7 +1936,7 @@ static void add_closures_for_completed_pending_batches(
pending_batch* pending = &calld->pending_batches[i];
if (pending_batch_is_completed(pending, calld, retry_state)) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: pending batch completed at index %" PRIuPTR,
chand, calld, i);
}
@@ -1896,7 +1969,7 @@ static void add_closures_to_fail_unstarted_pending_batches(
pending_batch* pending = &calld->pending_batches[i];
if (pending_batch_is_unstarted(pending, calld, retry_state)) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: failing unstarted pending batch at index "
"%" PRIuPTR,
chand, calld, i);
@@ -1940,7 +2013,7 @@ static void on_complete(void* arg, grpc_error* error) {
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
char* batch_str = grpc_transport_stream_op_batch_string(&batch_data->batch);
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: got on_complete, error=%s, batch=%s",
+ gpr_log(GPR_INFO, "chand=%p calld=%p: got on_complete, error=%s, batch=%s",
chand, calld, grpc_error_string(error), batch_str);
gpr_free(batch_str);
}
@@ -1951,11 +2024,13 @@ static void on_complete(void* arg, grpc_error* error) {
// If we have previously completed recv_trailing_metadata, then the
// call is finished.
bool call_finished = retry_state->completed_recv_trailing_metadata;
+ // Record whether we were already committed before receiving this callback.
+ const bool previously_committed = calld->retry_committed;
// Update bookkeeping in retry_state.
update_retry_state_for_completed_batch(batch_data, retry_state);
if (call_finished) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: call already finished", chand,
+ gpr_log(GPR_INFO, "chand=%p calld=%p: call already finished", chand,
calld);
}
} else {
@@ -1979,35 +2054,39 @@ static void on_complete(void* arg, grpc_error* error) {
if (md_batch->idx.named.grpc_retry_pushback_ms != nullptr) {
server_pushback_md = &md_batch->idx.named.grpc_retry_pushback_ms->md;
}
- } else if (retry_state->completed_recv_trailing_metadata) {
- call_finished = true;
}
- if (call_finished && grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: call finished, status=%s", chand,
- calld, grpc_status_code_to_string(status));
- }
- // If the call is finished, check if we should retry.
- if (call_finished &&
- maybe_retry(elem, batch_data, status, server_pushback_md)) {
- // Unref batch_data for deferred recv_initial_metadata_ready or
- // recv_message_ready callbacks, if any.
- if (batch_data->batch.recv_trailing_metadata &&
- retry_state->recv_initial_metadata_ready_deferred) {
- batch_data_unref(batch_data);
- GRPC_ERROR_UNREF(retry_state->recv_initial_metadata_error);
+ // If the call just finished, check if we should retry.
+ if (call_finished) {
+ if (grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_INFO, "chand=%p calld=%p: call finished, status=%s", chand,
+ calld, grpc_status_code_to_string(status));
}
- if (batch_data->batch.recv_trailing_metadata &&
- retry_state->recv_message_ready_deferred) {
+ if (maybe_retry(elem, batch_data, status, server_pushback_md)) {
+ // Unref batch_data for deferred recv_initial_metadata_ready or
+ // recv_message_ready callbacks, if any.
+ if (batch_data->batch.recv_trailing_metadata &&
+ retry_state->recv_initial_metadata_ready_deferred_batch !=
+ nullptr) {
+ batch_data_unref(batch_data);
+ GRPC_ERROR_UNREF(retry_state->recv_initial_metadata_error);
+ }
+ if (batch_data->batch.recv_trailing_metadata &&
+ retry_state->recv_message_ready_deferred_batch != nullptr) {
+ batch_data_unref(batch_data);
+ GRPC_ERROR_UNREF(retry_state->recv_message_error);
+ }
batch_data_unref(batch_data);
- GRPC_ERROR_UNREF(retry_state->recv_message_error);
+ return;
}
- batch_data_unref(batch_data);
- return;
+ // Not retrying, so commit the call.
+ retry_commit(elem, retry_state);
}
}
- // If the call is finished or retries are committed, free cached data for
- // send ops that we've just completed.
- if (call_finished || calld->retry_committed) {
+ // If we were already committed before receiving this callback, free
+ // cached data for send ops that we've just completed. (If the call has
+ // just now finished, the call to retry_commit() above will have freed all
+ // cached send ops, so we don't need to do it here.)
+ if (previously_committed) {
free_cached_send_op_data_for_completed_batch(elem, batch_data, retry_state);
}
// Call not being retried.
@@ -2042,20 +2121,8 @@ static void on_complete(void* arg, grpc_error* error) {
// Don't need batch_data anymore.
batch_data_unref(batch_data);
// Schedule all of the closures identified above.
- // Note that the call combiner will be yielded for each closure that
- // we schedule. We're already running in the call combiner, so one of
- // the closures can be scheduled directly, but the others will
- // have to re-enter the call combiner.
- if (num_closures > 0) {
- GRPC_CLOSURE_SCHED(closures[0].closure, closures[0].error);
- for (size_t i = 1; i < num_closures; ++i) {
- GRPC_CALL_COMBINER_START(calld->call_combiner, closures[i].closure,
- closures[i].error, closures[i].reason);
- }
- } else {
- GRPC_CALL_COMBINER_STOP(calld->call_combiner,
- "no closures to run for on_complete");
- }
+ execute_closures_in_call_combiner(elem, "on_complete", closures,
+ num_closures);
}
//
@@ -2072,6 +2139,31 @@ static void start_batch_in_call_combiner(void* arg, grpc_error* ignored) {
grpc_subchannel_call_process_op(subchannel_call, batch);
}
+// Adds a closure to closures that will execute batch in the call combiner.
+static void add_closure_for_subchannel_batch(
+ call_data* calld, grpc_transport_stream_op_batch* batch,
+ closure_to_execute* closures, size_t* num_closures) {
+ batch->handler_private.extra_arg = calld->subchannel_call;
+ GRPC_CLOSURE_INIT(&batch->handler_private.closure,
+ start_batch_in_call_combiner, batch,
+ grpc_schedule_on_exec_ctx);
+ closure_to_execute* closure = &closures[(*num_closures)++];
+ closure->closure = &batch->handler_private.closure;
+ closure->error = GRPC_ERROR_NONE;
+ // If the tracer is enabled, we log a more detailed message, which
+ // requires dynamic allocation. This will be freed in
+ // start_retriable_subchannel_batches().
+ if (grpc_client_channel_trace.enabled()) {
+ char* batch_str = grpc_transport_stream_op_batch_string(batch);
+ gpr_asprintf(const_cast<char**>(&closure->reason),
+ "starting batch in call combiner: %s", batch_str);
+ gpr_free(batch_str);
+ closure->free_reason = true;
+ } else {
+ closure->reason = "start_subchannel_batch";
+ }
+}
+
// Adds retriable send_initial_metadata op to batch_data.
static void add_retriable_send_initial_metadata_op(
call_data* calld, subchannel_call_retry_state* retry_state,
@@ -2131,7 +2223,7 @@ static void add_retriable_send_message_op(
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: starting calld->send_messages[%" PRIuPTR "]",
chand, calld, retry_state->started_send_message_count);
}
@@ -2218,7 +2310,7 @@ static void start_internal_recv_trailing_metadata(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: call failed but recv_trailing_metadata not "
"started; starting it internally",
chand, calld);
@@ -2227,8 +2319,12 @@ static void start_internal_recv_trailing_metadata(grpc_call_element* elem) {
static_cast<subchannel_call_retry_state*>(
grpc_connected_subchannel_call_get_parent_data(
calld->subchannel_call));
- subchannel_batch_data* batch_data = batch_data_create(elem, 1);
+ // Create batch_data with 2 refs, since this batch will be unreffed twice:
+ // once when the subchannel batch returns, and again when we actually get
+ // a recv_trailing_metadata op from the surface.
+ subchannel_batch_data* batch_data = batch_data_create(elem, 2);
add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data);
+ retry_state->recv_trailing_metadata_internal_batch = batch_data;
// Note: This will release the call combiner.
grpc_subchannel_call_process_op(calld->subchannel_call, &batch_data->batch);
}
@@ -2246,7 +2342,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
!retry_state->started_send_initial_metadata &&
!calld->pending_send_initial_metadata) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: replaying previously completed "
"send_initial_metadata op",
chand, calld);
@@ -2262,7 +2358,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
retry_state->completed_send_message_count &&
!calld->pending_send_message) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: replaying previously completed "
"send_message op",
chand, calld);
@@ -2281,7 +2377,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
!retry_state->started_send_trailing_metadata &&
!calld->pending_send_trailing_metadata) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: replaying previously completed "
"send_trailing_metadata op",
chand, calld);
@@ -2299,7 +2395,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
// *num_batches as needed.
static void add_subchannel_batches_for_pending_batches(
grpc_call_element* elem, subchannel_call_retry_state* retry_state,
- grpc_transport_stream_op_batch** batches, size_t* num_batches) {
+ closure_to_execute* closures, size_t* num_closures) {
call_data* calld = static_cast<call_data*>(elem->call_data);
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
pending_batch* pending = &calld->pending_batches[i];
@@ -2342,13 +2438,37 @@ static void add_subchannel_batches_for_pending_batches(
}
if (batch->recv_trailing_metadata &&
retry_state->started_recv_trailing_metadata) {
+ // If we previously completed a recv_trailing_metadata op
+ // initiated by start_internal_recv_trailing_metadata(), use the
+ // result of that instead of trying to re-start this op.
+ if (retry_state->recv_trailing_metadata_internal_batch != nullptr) {
+ // If the batch completed, then trigger the completion callback
+ // directly, so that we return the previously returned results to
+ // the application. Otherwise, just unref the internally
+ // started subchannel batch, since we'll propagate the
+ // completion when it completes.
+ if (retry_state->completed_recv_trailing_metadata) {
+ subchannel_batch_data* batch_data =
+ retry_state->recv_trailing_metadata_internal_batch;
+ closure_to_execute* closure = &closures[(*num_closures)++];
+ closure->closure = &batch_data->on_complete;
+ // Batches containing recv_trailing_metadata always succeed.
+ closure->error = GRPC_ERROR_NONE;
+ closure->reason =
+ "re-executing on_complete for recv_trailing_metadata "
+ "to propagate internally triggered result";
+ } else {
+ batch_data_unref(retry_state->recv_trailing_metadata_internal_batch);
+ }
+ retry_state->recv_trailing_metadata_internal_batch = nullptr;
+ }
continue;
}
// If we're not retrying, just send the batch as-is.
if (calld->method_params == nullptr ||
calld->method_params->retry_policy() == nullptr ||
calld->retry_committed) {
- batches[(*num_batches)++] = batch;
+ add_closure_for_subchannel_batch(calld, batch, closures, num_closures);
pending_batch_clear(calld, pending);
continue;
}
@@ -2385,7 +2505,8 @@ static void add_subchannel_batches_for_pending_batches(
GPR_ASSERT(batch->collect_stats);
add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data);
}
- batches[(*num_batches)++] = &batch_data->batch;
+ add_closure_for_subchannel_batch(calld, &batch_data->batch, closures,
+ num_closures);
}
}
@@ -2396,69 +2517,36 @@ static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: constructing retriable batches",
+ gpr_log(GPR_INFO, "chand=%p calld=%p: constructing retriable batches",
chand, calld);
}
subchannel_call_retry_state* retry_state =
static_cast<subchannel_call_retry_state*>(
grpc_connected_subchannel_call_get_parent_data(
calld->subchannel_call));
+ // Construct list of closures to execute, one for each pending batch.
// We can start up to 6 batches.
- grpc_transport_stream_op_batch*
- batches[GPR_ARRAY_SIZE(calld->pending_batches)];
- size_t num_batches = 0;
+ closure_to_execute closures[GPR_ARRAY_SIZE(calld->pending_batches)];
+ size_t num_closures = 0;
// Replay previously-returned send_* ops if needed.
subchannel_batch_data* replay_batch_data =
maybe_create_subchannel_batch_for_replay(elem, retry_state);
if (replay_batch_data != nullptr) {
- batches[num_batches++] = &replay_batch_data->batch;
+ add_closure_for_subchannel_batch(calld, &replay_batch_data->batch, closures,
+ &num_closures);
}
// Now add pending batches.
- add_subchannel_batches_for_pending_batches(elem, retry_state, batches,
- &num_batches);
+ add_subchannel_batches_for_pending_batches(elem, retry_state, closures,
+ &num_closures);
// Start batches on subchannel call.
- // Note that the call combiner will be yielded for each batch that we
- // send down. We're already running in the call combiner, so one of
- // the batches can be started directly, but the others will have to
- // re-enter the call combiner.
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: starting %" PRIuPTR
" retriable batches on subchannel_call=%p",
- chand, calld, num_batches, calld->subchannel_call);
- }
- if (num_batches == 0) {
- // This should be fairly rare, but it can happen when (e.g.) an
- // attempt completes before it has finished replaying all
- // previously sent messages.
- GRPC_CALL_COMBINER_STOP(calld->call_combiner,
- "no retriable subchannel batches to start");
- } else {
- for (size_t i = 1; i < num_batches; ++i) {
- if (grpc_client_channel_trace.enabled()) {
- char* batch_str = grpc_transport_stream_op_batch_string(batches[i]);
- gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: starting batch in call combiner: %s", chand,
- calld, batch_str);
- gpr_free(batch_str);
- }
- batches[i]->handler_private.extra_arg = calld->subchannel_call;
- GRPC_CLOSURE_INIT(&batches[i]->handler_private.closure,
- start_batch_in_call_combiner, batches[i],
- grpc_schedule_on_exec_ctx);
- GRPC_CALL_COMBINER_START(calld->call_combiner,
- &batches[i]->handler_private.closure,
- GRPC_ERROR_NONE, "start_subchannel_batch");
- }
- if (grpc_client_channel_trace.enabled()) {
- char* batch_str = grpc_transport_stream_op_batch_string(batches[0]);
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting batch: %s", chand, calld,
- batch_str);
- gpr_free(batch_str);
- }
- // Note: This will release the call combiner.
- grpc_subchannel_call_process_op(calld->subchannel_call, batches[0]);
+ chand, calld, num_closures, calld->subchannel_call);
}
+ execute_closures_in_call_combiner(elem, "start_retriable_subchannel_batches",
+ closures, num_closures);
}
//
@@ -2483,7 +2571,7 @@ static void create_subchannel_call(grpc_call_element* elem, grpc_error* error) {
grpc_error* new_error = calld->pick.connected_subchannel->CreateCall(
call_args, &calld->subchannel_call);
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
+ gpr_log(GPR_INFO, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
chand, calld, calld->subchannel_call, grpc_error_string(new_error));
}
if (new_error != GRPC_ERROR_NONE) {
@@ -2524,7 +2612,7 @@ static void pick_done(void* arg, grpc_error* error) {
: GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed to create subchannel", &error, 1);
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: failed to create subchannel: error=%s",
chand, calld, grpc_error_string(new_error));
}
@@ -2568,7 +2656,7 @@ static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
// the one we started it on. However, this will just be a no-op.
if (error != GRPC_ERROR_NONE && chand->lb_policy != nullptr) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
+ gpr_log(GPR_INFO, "chand=%p calld=%p: cancelling pick from LB policy %p",
chand, calld, chand->lb_policy.get());
}
chand->lb_policy->CancelPickLocked(&calld->pick, GRPC_ERROR_REF(error));
@@ -2583,8 +2671,8 @@ static void pick_callback_done_locked(void* arg, grpc_error* error) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
- chand, calld);
+ gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed asynchronously", chand,
+ calld);
}
async_pick_done_locked(elem, GRPC_ERROR_REF(error));
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
@@ -2596,7 +2684,7 @@ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
+ gpr_log(GPR_INFO, "chand=%p calld=%p: applying service config to call",
chand, calld);
}
if (chand->retry_throttle_data != nullptr) {
@@ -2634,8 +2722,8 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
- chand, calld, chand->lb_policy.get());
+ gpr_log(GPR_INFO, "chand=%p calld=%p: starting pick on lb_policy=%p", chand,
+ calld, chand->lb_policy.get());
}
// Only get service config data on the first attempt.
if (calld->num_attempts_completed == 0) {
@@ -2682,7 +2770,7 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
if (pick_done) {
// Pick completed synchronously.
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously",
+ gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed synchronously",
chand, calld);
}
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
@@ -2726,7 +2814,7 @@ static void pick_after_resolver_result_cancel_locked(void* arg,
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: cancelling pick waiting for resolver result",
chand, calld);
}
@@ -2746,7 +2834,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
if (args->finished) {
/* cancelled, do nothing */
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "call cancelled before resolver result");
+ gpr_log(GPR_INFO, "call cancelled before resolver result");
}
gpr_free(args);
return;
@@ -2757,14 +2845,14 @@ static void pick_after_resolver_result_done_locked(void* arg,
call_data* calld = static_cast<call_data*>(elem->call_data);
if (error != GRPC_ERROR_NONE) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
+ gpr_log(GPR_INFO, "chand=%p calld=%p: resolver failed to return data",
chand, calld);
}
async_pick_done_locked(elem, GRPC_ERROR_REF(error));
} else if (chand->resolver == nullptr) {
// Shutting down.
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver disconnected", chand,
+ gpr_log(GPR_INFO, "chand=%p calld=%p: resolver disconnected", chand,
calld);
}
async_pick_done_locked(
@@ -2780,7 +2868,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
.send_initial_metadata_flags;
if (send_initial_metadata_flags & GRPC_INITIAL_METADATA_WAIT_FOR_READY) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: resolver returned but no LB policy; "
"wait_for_ready=true; trying again",
chand, calld);
@@ -2788,7 +2876,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
pick_after_resolver_result_start_locked(elem);
} else {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: resolver returned but no LB policy; "
"wait_for_ready=false; failing",
chand, calld);
@@ -2801,7 +2889,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
}
} else {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
+ gpr_log(GPR_INFO, "chand=%p calld=%p: resolver returned, doing pick",
chand, calld);
}
if (pick_callback_start_locked(elem)) {
@@ -2819,7 +2907,7 @@ static void pick_after_resolver_result_start_locked(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: deferring pick pending resolver result", chand,
calld);
}
@@ -2886,7 +2974,7 @@ static void cc_start_transport_stream_op_batch(
// If we've previously been cancelled, immediately fail any new batches.
if (calld->cancel_error != GRPC_ERROR_NONE) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
+ gpr_log(GPR_INFO, "chand=%p calld=%p: failing batch with error: %s",
chand, calld, grpc_error_string(calld->cancel_error));
}
// Note: This will release the call combiner.
@@ -2905,7 +2993,7 @@ static void cc_start_transport_stream_op_batch(
calld->cancel_error =
GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
+ gpr_log(GPR_INFO, "chand=%p calld=%p: recording cancel_error=%s", chand,
calld, grpc_error_string(calld->cancel_error));
}
// If we do not have a subchannel call (i.e., a pick has not yet
@@ -2931,7 +3019,7 @@ static void cc_start_transport_stream_op_batch(
// streaming calls).
if (calld->subchannel_call != nullptr) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: starting batch on subchannel_call=%p", chand,
calld, calld->subchannel_call);
}
@@ -2943,7 +3031,7 @@ static void cc_start_transport_stream_op_batch(
// combiner to start a pick.
if (batch->send_initial_metadata) {
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering client_channel combiner",
+ gpr_log(GPR_INFO, "chand=%p calld=%p: entering client_channel combiner",
chand, calld);
}
GRPC_CLOSURE_SCHED(
@@ -2953,7 +3041,7 @@ static void cc_start_transport_stream_op_batch(
} else {
// For all other batches, release the call combiner.
if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"chand=%p calld=%p: saved batch, yeilding call combiner", chand,
calld);
}
@@ -3164,6 +3252,8 @@ static void watch_connectivity_state_locked(void* arg,
external_connectivity_watcher* found = nullptr;
if (w->state != nullptr) {
external_connectivity_watcher_list_append(w->chand, w);
+ // An assumption is being made that the closure is scheduled on the exec ctx
+ // scheduler and that GRPC_CLOSURE_RUN would run the closure immediately.
GRPC_CLOSURE_RUN(w->watcher_timer_init, GRPC_ERROR_NONE);
GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete_locked, w,
grpc_combiner_scheduler(w->chand->combiner));
diff --git a/src/core/ext/filters/client_channel/http_connect_handshaker.cc b/src/core/ext/filters/client_channel/http_connect_handshaker.cc
index fb29fa788d..4e8b8b71db 100644
--- a/src/core/ext/filters/client_channel/http_connect_handshaker.cc
+++ b/src/core/ext/filters/client_channel/http_connect_handshaker.cc
@@ -326,7 +326,7 @@ static void http_connect_handshaker_do_handshake(
static const grpc_handshaker_vtable http_connect_handshaker_vtable = {
http_connect_handshaker_destroy, http_connect_handshaker_shutdown,
- http_connect_handshaker_do_handshake};
+ http_connect_handshaker_do_handshake, "http_connect"};
static grpc_handshaker* grpc_http_connect_handshaker_create() {
http_connect_handshaker* handshaker =
diff --git a/src/core/ext/filters/client_channel/lb_policy.cc b/src/core/ext/filters/client_channel/lb_policy.cc
index fa63dd75b5..e065f45639 100644
--- a/src/core/ext/filters/client_channel/lb_policy.cc
+++ b/src/core/ext/filters/client_channel/lb_policy.cc
@@ -44,13 +44,13 @@ void LoadBalancingPolicy::TryReresolutionLocked(
GRPC_CLOSURE_SCHED(request_reresolution_, error);
request_reresolution_ = nullptr;
if (grpc_lb_trace->enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"%s %p: scheduling re-resolution closure with error=%s.",
grpc_lb_trace->name(), this, grpc_error_string(error));
}
} else {
if (grpc_lb_trace->enabled()) {
- gpr_log(GPR_DEBUG, "%s %p: no available re-resolution closure.",
+ gpr_log(GPR_INFO, "%s %p: no available re-resolution closure.",
grpc_lb_trace->name(), this);
}
}
diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h
index c3e43e5ef6..454e00a690 100644
--- a/src/core/ext/filters/client_channel/lb_policy.h
+++ b/src/core/ext/filters/client_channel/lb_policy.h
@@ -162,6 +162,10 @@ class LoadBalancingPolicy
GRPC_ABSTRACT_BASE_CLASS
protected:
+ // So Delete() can access our protected dtor.
+ template <typename T>
+ friend void Delete(T*);
+
explicit LoadBalancingPolicy(const Args& args);
virtual ~LoadBalancingPolicy();
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index 0b2a30818e..1a675476f0 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -189,6 +189,10 @@ class GrpcLb : public LoadBalancingPolicy {
bool seen_initial_response() const { return seen_initial_response_; }
private:
+ // So Delete() can access our private dtor.
+ template <typename T>
+ friend void grpc_core::Delete(T*);
+
~BalancerCallState();
GrpcLb* grpclb_policy() const {
@@ -1243,7 +1247,7 @@ bool GrpcLb::PickLocked(PickState* pick) {
}
} else { // rr_policy_ == NULL
if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"[grpclb %p] No RR policy. Adding to grpclb's pending picks",
this);
}
@@ -1409,14 +1413,13 @@ void GrpcLb::OnFallbackTimerLocked(void* arg, grpc_error* error) {
void GrpcLb::StartBalancerCallRetryTimerLocked() {
grpc_millis next_try = lb_call_backoff_.NextAttemptTime();
if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_DEBUG, "[grpclb %p] Connection to LB server lost...", this);
+ gpr_log(GPR_INFO, "[grpclb %p] Connection to LB server lost...", this);
grpc_millis timeout = next_try - ExecCtx::Get()->Now();
if (timeout > 0) {
- gpr_log(GPR_DEBUG,
- "[grpclb %p] ... retry_timer_active in %" PRIuPTR "ms.", this,
- timeout);
+ gpr_log(GPR_INFO, "[grpclb %p] ... retry_timer_active in %" PRIuPTR "ms.",
+ this, timeout);
} else {
- gpr_log(GPR_DEBUG, "[grpclb %p] ... retry_timer_active immediately.",
+ gpr_log(GPR_INFO, "[grpclb %p] ... retry_timer_active immediately.",
this);
}
}
@@ -1724,7 +1727,7 @@ void GrpcLb::CreateOrUpdateRoundRobinPolicyLocked() {
GPR_ASSERT(args != nullptr);
if (rr_policy_ != nullptr) {
if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_DEBUG, "[grpclb %p] Updating RR policy %p", this,
+ gpr_log(GPR_INFO, "[grpclb %p] Updating RR policy %p", this,
rr_policy_.get());
}
rr_policy_->UpdateLocked(*args);
@@ -1735,7 +1738,7 @@ void GrpcLb::CreateOrUpdateRoundRobinPolicyLocked() {
lb_policy_args.args = args;
CreateRoundRobinPolicyLocked(lb_policy_args);
if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_DEBUG, "[grpclb %p] Created new RR policy %p", this,
+ gpr_log(GPR_INFO, "[grpclb %p] Created new RR policy %p", this,
rr_policy_.get());
}
}
@@ -1751,7 +1754,7 @@ void GrpcLb::OnRoundRobinRequestReresolutionLocked(void* arg,
}
if (grpc_lb_glb_trace.enabled()) {
gpr_log(
- GPR_DEBUG,
+ GPR_INFO,
"[grpclb %p] Re-resolution requested from the internal RR policy (%p).",
grpclb_policy, grpclb_policy->rr_policy_.get());
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
index 9090c34412..a07e90322c 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -95,7 +95,7 @@ PickFirst::PickFirst(const Args& args) : LoadBalancingPolicy(args) {
grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE,
"pick_first");
if (grpc_lb_pick_first_trace.enabled()) {
- gpr_log(GPR_DEBUG, "Pick First %p created.", this);
+ gpr_log(GPR_INFO, "Pick First %p created.", this);
}
UpdateLocked(*args.args);
grpc_subchannel_index_ref();
@@ -103,7 +103,7 @@ PickFirst::PickFirst(const Args& args) : LoadBalancingPolicy(args) {
PickFirst::~PickFirst() {
if (grpc_lb_pick_first_trace.enabled()) {
- gpr_log(GPR_DEBUG, "Destroying Pick First %p", this);
+ gpr_log(GPR_INFO, "Destroying Pick First %p", this);
}
GPR_ASSERT(subchannel_list_ == nullptr);
GPR_ASSERT(latest_pending_subchannel_list_ == nullptr);
@@ -126,7 +126,7 @@ void PickFirst::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) {
void PickFirst::ShutdownLocked() {
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_pick_first_trace.enabled()) {
- gpr_log(GPR_DEBUG, "Pick First %p Shutting down", this);
+ gpr_log(GPR_INFO, "Pick First %p Shutting down", this);
}
shutdown_ = true;
PickState* pick;
@@ -372,7 +372,7 @@ void PickFirst::UpdateLocked(const grpc_channel_args& args) {
// subchannel list.
if (latest_pending_subchannel_list_ != nullptr) {
if (grpc_lb_pick_first_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"Pick First %p Shutting down latest pending subchannel list "
"%p, about to be replaced by newer latest %p",
this, latest_pending_subchannel_list_, subchannel_list);
@@ -396,7 +396,7 @@ void PickFirst::OnConnectivityChangedLocked(void* arg, grpc_error* error) {
grpc_lb_subchannel_data* sd = static_cast<grpc_lb_subchannel_data*>(arg);
PickFirst* p = static_cast<PickFirst*>(sd->subchannel_list->policy);
if (grpc_lb_pick_first_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"Pick First %p connectivity changed for subchannel %p (%" PRIuPTR
" of %" PRIuPTR
"), subchannel_list %p: state=%s p->shutdown_=%d "
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
index e534131c02..b470016bd9 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@@ -114,7 +114,7 @@ RoundRobin::RoundRobin(const Args& args) : LoadBalancingPolicy(args) {
"round_robin");
UpdateLocked(*args.args);
if (grpc_lb_round_robin_trace.enabled()) {
- gpr_log(GPR_DEBUG, "[RR %p] Created with %" PRIuPTR " subchannels", this,
+ gpr_log(GPR_INFO, "[RR %p] Created with %" PRIuPTR " subchannels", this,
subchannel_list_->num_subchannels);
}
grpc_subchannel_index_ref();
@@ -122,7 +122,7 @@ RoundRobin::RoundRobin(const Args& args) : LoadBalancingPolicy(args) {
RoundRobin::~RoundRobin() {
if (grpc_lb_round_robin_trace.enabled()) {
- gpr_log(GPR_DEBUG, "[RR %p] Destroying Round Robin policy", this);
+ gpr_log(GPR_INFO, "[RR %p] Destroying Round Robin policy", this);
}
GPR_ASSERT(subchannel_list_ == nullptr);
GPR_ASSERT(latest_pending_subchannel_list_ == nullptr);
@@ -152,7 +152,7 @@ size_t RoundRobin::GetNextReadySubchannelIndexLocked() {
subchannel_list_->num_subchannels;
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(
- GPR_DEBUG,
+ GPR_INFO,
"[RR %p] checking subchannel %p, subchannel_list %p, index %" PRIuPTR
": state=%s",
this, subchannel_list_->subchannels[index].subchannel,
@@ -163,7 +163,7 @@ size_t RoundRobin::GetNextReadySubchannelIndexLocked() {
if (subchannel_list_->subchannels[index].curr_connectivity_state ==
GRPC_CHANNEL_READY) {
if (grpc_lb_round_robin_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"[RR %p] found next ready subchannel (%p) at index %" PRIuPTR
" of subchannel_list %p",
this, subchannel_list_->subchannels[index].subchannel, index,
@@ -173,7 +173,7 @@ size_t RoundRobin::GetNextReadySubchannelIndexLocked() {
}
}
if (grpc_lb_round_robin_trace.enabled()) {
- gpr_log(GPR_DEBUG, "[RR %p] no subchannels in ready state", this);
+ gpr_log(GPR_INFO, "[RR %p] no subchannels in ready state", this);
}
return subchannel_list_->num_subchannels;
}
@@ -183,7 +183,7 @@ void RoundRobin::UpdateLastReadySubchannelIndexLocked(size_t last_ready_index) {
GPR_ASSERT(last_ready_index < subchannel_list_->num_subchannels);
last_ready_subchannel_index_ = last_ready_index;
if (grpc_lb_round_robin_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"[RR %p] setting last_ready_subchannel_index=%" PRIuPTR
" (SC %p, CSC %p)",
this, last_ready_index,
@@ -207,7 +207,7 @@ void RoundRobin::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) {
void RoundRobin::ShutdownLocked() {
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_round_robin_trace.enabled()) {
- gpr_log(GPR_DEBUG, "[RR %p] Shutting down", this);
+ gpr_log(GPR_INFO, "[RR %p] Shutting down", this);
}
shutdown_ = true;
PickState* pick;
@@ -311,8 +311,7 @@ void RoundRobin::ExitIdleLocked() {
bool RoundRobin::PickLocked(PickState* pick) {
if (grpc_lb_round_robin_trace.enabled()) {
- gpr_log(GPR_DEBUG, "[RR %p] Trying to pick (shutdown: %d)", this,
- shutdown_);
+ gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", this, shutdown_);
}
GPR_ASSERT(!shutdown_);
if (subchannel_list_ != nullptr) {
@@ -327,7 +326,7 @@ bool RoundRobin::PickLocked(PickState* pick) {
}
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(
- GPR_DEBUG,
+ GPR_INFO,
"[RR %p] Picked target <-- Subchannel %p (connected %p) (sl %p, "
"index %" PRIuPTR ")",
this, sd->subchannel, pick->connected_subchannel.get(),
@@ -416,7 +415,7 @@ void RoundRobin::OnConnectivityChangedLocked(void* arg, grpc_error* error) {
RoundRobin* p = static_cast<RoundRobin*>(sd->subchannel_list->policy);
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(
- GPR_DEBUG,
+ GPR_INFO,
"[RR %p] connectivity changed for subchannel %p, subchannel_list %p: "
"prev_state=%s new_state=%s p->shutdown=%d "
"sd->subchannel_list->shutting_down=%d error=%s",
@@ -458,7 +457,7 @@ void RoundRobin::OnConnectivityChangedLocked(void* arg, grpc_error* error) {
case GRPC_CHANNEL_TRANSIENT_FAILURE: {
sd->connected_subchannel.reset();
if (grpc_lb_round_robin_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"[RR %p] Subchannel %p has gone into TRANSIENT_FAILURE. "
"Requesting re-resolution",
p, sd->subchannel);
@@ -483,7 +482,7 @@ void RoundRobin::OnConnectivityChangedLocked(void* arg, grpc_error* error) {
p->subchannel_list_ != nullptr
? p->subchannel_list_->num_subchannels
: 0;
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"[RR %p] phasing out subchannel list %p (size %" PRIuPTR
") in favor of %p (size %" PRIuPTR ")",
p, p->subchannel_list_, num_subchannels, sd->subchannel_list,
@@ -517,7 +516,7 @@ void RoundRobin::OnConnectivityChangedLocked(void* arg, grpc_error* error) {
*pick->user_data = selected->user_data;
}
if (grpc_lb_round_robin_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"[RR %p] Fulfilling pending pick. Target <-- subchannel %p "
"(subchannel_list %p, index %" PRIuPTR ")",
p, selected->subchannel, p->subchannel_list_,
@@ -584,7 +583,7 @@ void RoundRobin::UpdateLocked(const grpc_channel_args& args) {
}
grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p;
if (grpc_lb_round_robin_trace.enabled()) {
- gpr_log(GPR_DEBUG, "[RR %p] received update with %" PRIuPTR " addresses",
+ gpr_log(GPR_INFO, "[RR %p] received update with %" PRIuPTR " addresses",
this, addresses->num_addresses);
}
grpc_lb_subchannel_list* subchannel_list = grpc_lb_subchannel_list_create(
@@ -629,7 +628,7 @@ void RoundRobin::UpdateLocked(const grpc_channel_args& args) {
}
if (latest_pending_subchannel_list_ != nullptr) {
if (grpc_lb_round_robin_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"[RR %p] Shutting down latest pending subchannel list %p, "
"about to be replaced by newer latest %p",
this, latest_pending_subchannel_list_, subchannel_list);
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
index 79cb64c6c6..257db57575 100644
--- a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
@@ -34,7 +34,7 @@ void grpc_lb_subchannel_data_unref_subchannel(grpc_lb_subchannel_data* sd,
const char* reason) {
if (sd->subchannel != nullptr) {
if (sd->subchannel_list->tracer->enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): unreffing subchannel",
sd->subchannel_list->tracer->name(), sd->subchannel_list->policy,
@@ -57,7 +57,7 @@ void grpc_lb_subchannel_data_start_connectivity_watch(
grpc_lb_subchannel_data* sd) {
if (sd->subchannel_list->tracer->enabled()) {
gpr_log(
- GPR_DEBUG,
+ GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): requesting connectivity change "
"notification (from %s)",
@@ -77,7 +77,7 @@ void grpc_lb_subchannel_data_start_connectivity_watch(
void grpc_lb_subchannel_data_stop_connectivity_watch(
grpc_lb_subchannel_data* sd) {
if (sd->subchannel_list->tracer->enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): stopping connectivity watch",
sd->subchannel_list->tracer->name(), sd->subchannel_list->policy,
@@ -98,7 +98,7 @@ grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
static_cast<grpc_lb_subchannel_list*>(
gpr_zalloc(sizeof(*subchannel_list)));
if (tracer->enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"[%s %p] Creating subchannel list %p for %" PRIuPTR " subchannels",
tracer->name(), p, subchannel_list, addresses->num_addresses);
}
@@ -132,7 +132,7 @@ grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
if (tracer->enabled()) {
char* address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"[%s %p] could not create subchannel for address uri %s, "
"ignoring",
tracer->name(), subchannel_list->policy, address_uri);
@@ -143,7 +143,7 @@ grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
if (tracer->enabled()) {
char* address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR
": Created subchannel %p for address uri %s",
tracer->name(), p, subchannel_list, subchannel_index, subchannel,
@@ -175,7 +175,7 @@ grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
static void subchannel_list_destroy(grpc_lb_subchannel_list* subchannel_list) {
if (subchannel_list->tracer->enabled()) {
- gpr_log(GPR_DEBUG, "[%s %p] Destroying subchannel_list %p",
+ gpr_log(GPR_INFO, "[%s %p] Destroying subchannel_list %p",
subchannel_list->tracer->name(), subchannel_list->policy,
subchannel_list);
}
@@ -192,7 +192,7 @@ void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list* subchannel_list,
gpr_ref_non_zero(&subchannel_list->refcount);
if (subchannel_list->tracer->enabled()) {
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
- gpr_log(GPR_DEBUG, "[%s %p] subchannel_list %p REF %lu->%lu (%s)",
+ gpr_log(GPR_INFO, "[%s %p] subchannel_list %p REF %lu->%lu (%s)",
subchannel_list->tracer->name(), subchannel_list->policy,
subchannel_list, static_cast<unsigned long>(count - 1),
static_cast<unsigned long>(count), reason);
@@ -204,7 +204,7 @@ void grpc_lb_subchannel_list_unref(grpc_lb_subchannel_list* subchannel_list,
const bool done = gpr_unref(&subchannel_list->refcount);
if (subchannel_list->tracer->enabled()) {
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
- gpr_log(GPR_DEBUG, "[%s %p] subchannel_list %p UNREF %lu->%lu (%s)",
+ gpr_log(GPR_INFO, "[%s %p] subchannel_list %p UNREF %lu->%lu (%s)",
subchannel_list->tracer->name(), subchannel_list->policy,
subchannel_list, static_cast<unsigned long>(count + 1),
static_cast<unsigned long>(count), reason);
@@ -217,7 +217,7 @@ void grpc_lb_subchannel_list_unref(grpc_lb_subchannel_list* subchannel_list,
static void subchannel_data_cancel_connectivity_watch(
grpc_lb_subchannel_data* sd, const char* reason) {
if (sd->subchannel_list->tracer->enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): canceling connectivity watch (%s)",
sd->subchannel_list->tracer->name(), sd->subchannel_list->policy,
@@ -232,7 +232,7 @@ static void subchannel_data_cancel_connectivity_watch(
void grpc_lb_subchannel_list_shutdown_and_unref(
grpc_lb_subchannel_list* subchannel_list, const char* reason) {
if (subchannel_list->tracer->enabled()) {
- gpr_log(GPR_DEBUG, "[%s %p] Shutting down subchannel_list %p (%s)",
+ gpr_log(GPR_INFO, "[%s %p] Shutting down subchannel_list %p (%s)",
subchannel_list->tracer->name(), subchannel_list->policy,
subchannel_list, reason);
}
diff --git a/src/core/ext/filters/client_channel/method_params.h b/src/core/ext/filters/client_channel/method_params.h
index 099924edf3..a31d360f17 100644
--- a/src/core/ext/filters/client_channel/method_params.h
+++ b/src/core/ext/filters/client_channel/method_params.h
@@ -60,6 +60,10 @@ class ClientChannelMethodParams : public RefCounted<ClientChannelMethodParams> {
template <typename T, typename... Args>
friend T* grpc_core::New(Args&&... args);
+ // So Delete() can call our private dtor.
+ template <typename T>
+ friend void grpc_core::Delete(T*);
+
ClientChannelMethodParams() {}
virtual ~ClientChannelMethodParams() {}
diff --git a/src/core/ext/filters/client_channel/resolver.h b/src/core/ext/filters/client_channel/resolver.h
index cdb5a20ea3..02380314dd 100644
--- a/src/core/ext/filters/client_channel/resolver.h
+++ b/src/core/ext/filters/client_channel/resolver.h
@@ -105,6 +105,10 @@ class Resolver : public InternallyRefCountedWithTracing<Resolver> {
GRPC_ABSTRACT_BASE_CLASS
protected:
+ // So Delete() can access our protected dtor.
+ template <typename T>
+ friend void Delete(T*);
+
/// Does NOT take ownership of the reference to \a combiner.
// TODO(roth): Once we have a C++-like interface for combiners, this
// API should change to take a RefCountedPtr<>, so that we always take
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
index 6ac9a77635..c3c62b60bf 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
@@ -363,6 +363,15 @@ void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
}
void AresDnsResolver::MaybeStartResolvingLocked() {
+ // If there is an existing timer, the time it fires is the earliest time we
+ // can start the next resolution.
+ if (have_next_resolution_timer_) {
+ // TODO(dgq): remove the following two lines once Pick First stops
+ // discarding subchannels after selecting.
+ ++resolved_version_;
+ MaybeFinishNextLocked();
+ return;
+ }
if (last_resolution_timestamp_ >= 0) {
const grpc_millis earliest_next_resolution =
last_resolution_timestamp_ + min_time_between_resolutions_;
@@ -375,17 +384,15 @@ void AresDnsResolver::MaybeStartResolvingLocked() {
"In cooldown from last resolution (from %" PRIdPTR
" ms ago). Will resolve again in %" PRIdPTR " ms",
last_resolution_ago, ms_until_next_resolution);
- if (!have_next_resolution_timer_) {
- have_next_resolution_timer_ = true;
- // TODO(roth): We currently deal with this ref manually. Once the
- // new closure API is done, find a way to track this ref with the timer
- // callback as part of the type system.
- RefCountedPtr<Resolver> self =
- Ref(DEBUG_LOCATION, "next_resolution_timer_cooldown");
- self.release();
- grpc_timer_init(&next_resolution_timer_, ms_until_next_resolution,
- &on_next_resolution_);
- }
+ have_next_resolution_timer_ = true;
+ // TODO(roth): We currently deal with this ref manually. Once the
+ // new closure API is done, find a way to track this ref with the timer
+ // callback as part of the type system.
+ RefCountedPtr<Resolver> self =
+ Ref(DEBUG_LOCATION, "next_resolution_timer_cooldown");
+ self.release();
+ grpc_timer_init(&next_resolution_timer_, ms_until_next_resolution,
+ &on_next_resolution_);
// TODO(dgq): remove the following two lines once Pick First stops
// discarding subchannels after selecting.
++resolved_version_;
@@ -397,6 +404,7 @@ void AresDnsResolver::MaybeStartResolvingLocked() {
}
void AresDnsResolver::StartResolvingLocked() {
+ gpr_log(GPR_DEBUG, "Start resolving.");
// TODO(roth): We currently deal with this ref manually. Once the
// new closure API is done, find a way to track this ref with the timer
// callback as part of the type system.
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
index fb2435749d..e86ab5a37e 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
@@ -153,7 +153,10 @@ static void grpc_ares_request_unref(grpc_ares_request* r) {
/* If there are no pending queries, invoke on_done callback and destroy the
request */
if (gpr_unref(&r->pending_queries)) {
- grpc_cares_wrapper_address_sorting_sort(*(r->lb_addrs_out));
+ grpc_lb_addresses* lb_addrs = *(r->lb_addrs_out);
+ if (lb_addrs != nullptr) {
+ grpc_cares_wrapper_address_sorting_sort(lb_addrs);
+ }
GRPC_CLOSURE_SCHED(r->on_done, r->error);
gpr_mu_destroy(&r->mu);
grpc_ares_ev_driver_destroy(r->ev_driver);
diff --git a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
index fbab136421..e7842a7951 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
@@ -236,6 +236,15 @@ void NativeDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
}
void NativeDnsResolver::MaybeStartResolvingLocked() {
+ // If there is an existing timer, the time it fires is the earliest time we
+ // can start the next resolution.
+ if (have_next_resolution_timer_) {
+ // TODO(dgq): remove the following two lines once Pick First stops
+ // discarding subchannels after selecting.
+ ++resolved_version_;
+ MaybeFinishNextLocked();
+ return;
+ }
if (last_resolution_timestamp_ >= 0) {
const grpc_millis earliest_next_resolution =
last_resolution_timestamp_ + min_time_between_resolutions_;
@@ -248,17 +257,15 @@ void NativeDnsResolver::MaybeStartResolvingLocked() {
"In cooldown from last resolution (from %" PRIdPTR
" ms ago). Will resolve again in %" PRIdPTR " ms",
last_resolution_ago, ms_until_next_resolution);
- if (!have_next_resolution_timer_) {
- have_next_resolution_timer_ = true;
- // TODO(roth): We currently deal with this ref manually. Once the
- // new closure API is done, find a way to track this ref with the timer
- // callback as part of the type system.
- RefCountedPtr<Resolver> self =
- Ref(DEBUG_LOCATION, "next_resolution_timer_cooldown");
- self.release();
- grpc_timer_init(&next_resolution_timer_, ms_until_next_resolution,
- &on_next_resolution_);
- }
+ have_next_resolution_timer_ = true;
+ // TODO(roth): We currently deal with this ref manually. Once the
+ // new closure API is done, find a way to track this ref with the timer
+ // callback as part of the type system.
+ RefCountedPtr<Resolver> self =
+ Ref(DEBUG_LOCATION, "next_resolution_timer_cooldown");
+ self.release();
+ grpc_timer_init(&next_resolution_timer_, ms_until_next_resolution,
+ &on_next_resolution_);
// TODO(dgq): remove the following two lines once Pick First stops
// discarding subchannels after selecting.
++resolved_version_;
@@ -270,6 +277,7 @@ void NativeDnsResolver::MaybeStartResolvingLocked() {
}
void NativeDnsResolver::StartResolvingLocked() {
+ gpr_log(GPR_DEBUG, "Start resolving.");
// TODO(roth): We currently deal with this ref manually. Once the
// new closure API is done, find a way to track this ref with the timer
// callback as part of the type system.
diff --git a/src/core/ext/filters/client_channel/retry_throttle.h b/src/core/ext/filters/client_channel/retry_throttle.h
index 2b6fa0a70b..fddafcd903 100644
--- a/src/core/ext/filters/client_channel/retry_throttle.h
+++ b/src/core/ext/filters/client_channel/retry_throttle.h
@@ -42,6 +42,10 @@ class ServerRetryThrottleData : public RefCounted<ServerRetryThrottleData> {
intptr_t milli_token_ratio() const { return milli_token_ratio_; }
private:
+ // So Delete() can call our private dtor.
+ template <typename T>
+ friend void grpc_core::Delete(T*);
+
~ServerRetryThrottleData();
void GetReplacementThrottleDataIfNeeded(
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.cc b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
index e7d9949386..f8f478b6c0 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.cc
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
@@ -234,7 +234,7 @@ static void finish_send_message(grpc_call_element* elem) {
static_cast<float>(before_size);
GPR_ASSERT(grpc_message_compression_algorithm_name(
calld->message_compression_algorithm, &algo_name));
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR
" bytes (%.2f%% savings)",
algo_name, before_size, after_size, 100 * savings_ratio);
@@ -246,7 +246,7 @@ static void finish_send_message(grpc_call_element* elem) {
const char* algo_name;
GPR_ASSERT(grpc_message_compression_algorithm_name(
calld->message_compression_algorithm, &algo_name));
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"Algorithm '%s' enabled but decided not to compress. Input size: "
"%" PRIuPTR,
algo_name, calld->slices.length);
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
index 6980b8671e..0ef73961a5 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
@@ -807,7 +807,7 @@ static const char* write_state_name(grpc_chttp2_write_state st) {
static void set_write_state(grpc_chttp2_transport* t,
grpc_chttp2_write_state st, const char* reason) {
- GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_DEBUG, "W:%p %s state %s -> %s [%s]", t,
+ GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "W:%p %s state %s -> %s [%s]", t,
t->is_client ? "CLIENT" : "SERVER",
write_state_name(t->write_state),
write_state_name(st), reason));
@@ -1072,7 +1072,7 @@ void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
uint32_t goaway_error,
grpc_slice goaway_text) {
// GRPC_CHTTP2_IF_TRACING(
- // gpr_log(GPR_DEBUG, "got goaway [%d]: %s", goaway_error, msg));
+ // gpr_log(GPR_INFO, "got goaway [%d]: %s", goaway_error, msg));
// Discard the error from a previous goaway frame (if any)
if (t->goaway_error != GRPC_ERROR_NONE) {
@@ -1118,7 +1118,7 @@ static void maybe_start_some_streams(grpc_chttp2_transport* t) {
grpc_chttp2_list_pop_waiting_for_concurrency(t, &s)) {
/* safe since we can't (legally) be parsing this stream yet */
GRPC_CHTTP2_IF_TRACING(gpr_log(
- GPR_DEBUG, "HTTP:%s: Allocating new grpc_chttp2_stream %p to id %d",
+ GPR_INFO, "HTTP:%s: Allocating new grpc_chttp2_stream %p to id %d",
t->is_client ? "CLI" : "SVR", s, t->next_stream_id));
GPR_ASSERT(s->id == 0);
@@ -1183,7 +1183,7 @@ void grpc_chttp2_complete_closure_step(grpc_chttp2_transport* t,
if (grpc_http_trace.enabled()) {
const char* errstr = grpc_error_string(error);
gpr_log(
- GPR_DEBUG,
+ GPR_INFO,
"complete_closure_step: t=%p %p refs=%d flags=0x%04x desc=%s err=%s "
"write_state=%s",
t, closure,
@@ -1336,7 +1336,7 @@ static void perform_stream_op_locked(void* stream_op,
if (grpc_http_trace.enabled()) {
char* str = grpc_transport_stream_op_batch_string(op);
- gpr_log(GPR_DEBUG, "perform_stream_op_locked: %s; on_complete = %p", str,
+ gpr_log(GPR_INFO, "perform_stream_op_locked: %s; on_complete = %p", str,
op->on_complete);
gpr_free(str);
if (op->send_initial_metadata) {
@@ -1638,7 +1638,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
if (grpc_http_trace.enabled()) {
char* str = grpc_transport_stream_op_batch_string(op);
- gpr_log(GPR_DEBUG, "perform_stream_op[s=%p]: %s", s, str);
+ gpr_log(GPR_INFO, "perform_stream_op[s=%p]: %s", s, str);
gpr_free(str);
}
@@ -2529,7 +2529,7 @@ static void schedule_bdp_ping_locked(grpc_chttp2_transport* t) {
static void start_bdp_ping_locked(void* tp, grpc_error* error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
if (grpc_http_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%s: Start BDP ping err=%s", t->peer_string,
+ gpr_log(GPR_INFO, "%s: Start BDP ping err=%s", t->peer_string,
grpc_error_string(error));
}
/* Reset the keepalive ping timer */
@@ -2542,7 +2542,7 @@ static void start_bdp_ping_locked(void* tp, grpc_error* error) {
static void finish_bdp_ping_locked(void* tp, grpc_error* error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
if (grpc_http_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%s: Complete BDP ping err=%s", t->peer_string,
+ gpr_log(GPR_INFO, "%s: Complete BDP ping err=%s", t->peer_string,
grpc_error_string(error));
}
if (error != GRPC_ERROR_NONE) {
@@ -2716,8 +2716,7 @@ static void keepalive_watchdog_fired_locked(void* arg, grpc_error* error) {
static void connectivity_state_set(grpc_chttp2_transport* t,
grpc_connectivity_state state,
grpc_error* error, const char* reason) {
- GRPC_CHTTP2_IF_TRACING(
- gpr_log(GPR_DEBUG, "set connectivity_state=%d", state));
+ GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "set connectivity_state=%d", state));
grpc_connectivity_state_set(&t->channel_callback.state_tracker, state, error,
reason);
}
@@ -2984,7 +2983,7 @@ static void benign_reclaimer_locked(void* arg, grpc_error* error) {
/* Channel with no active streams: send a goaway to try and make it
* disconnect cleanly */
if (grpc_resource_quota_trace.enabled()) {
- gpr_log(GPR_DEBUG, "HTTP2: %s - send goaway to free memory",
+ gpr_log(GPR_INFO, "HTTP2: %s - send goaway to free memory",
t->peer_string);
}
send_goaway(t,
@@ -2992,7 +2991,7 @@ static void benign_reclaimer_locked(void* arg, grpc_error* error) {
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Buffers full"),
GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_ENHANCE_YOUR_CALM));
} else if (error == GRPC_ERROR_NONE && grpc_resource_quota_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"HTTP2: %s - skip benign reclamation, there are still %" PRIdPTR
" streams",
t->peer_string, grpc_chttp2_stream_map_size(&t->stream_map));
@@ -3013,7 +3012,7 @@ static void destructive_reclaimer_locked(void* arg, grpc_error* error) {
grpc_chttp2_stream* s = static_cast<grpc_chttp2_stream*>(
grpc_chttp2_stream_map_rand(&t->stream_map));
if (grpc_resource_quota_trace.enabled()) {
- gpr_log(GPR_DEBUG, "HTTP2: %s - abandon stream id %d", t->peer_string,
+ gpr_log(GPR_INFO, "HTTP2: %s - abandon stream id %d", t->peer_string,
s->id);
}
grpc_chttp2_cancel_stream(
diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.cc b/src/core/ext/transport/chttp2/transport/frame_settings.cc
index 9ea27dcd47..987ac0e79d 100644
--- a/src/core/ext/transport/chttp2/transport/frame_settings.cc
+++ b/src/core/ext/transport/chttp2/transport/frame_settings.cc
@@ -217,14 +217,14 @@ grpc_error* grpc_chttp2_settings_parser_parse(void* p, grpc_chttp2_transport* t,
t->initial_window_update += static_cast<int64_t>(parser->value) -
parser->incoming_settings[id];
if (grpc_http_trace.enabled() || grpc_flowctl_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%p[%s] adding %d for initial_window change",
- t, t->is_client ? "cli" : "svr",
+ gpr_log(GPR_INFO, "%p[%s] adding %d for initial_window change", t,
+ t->is_client ? "cli" : "svr",
static_cast<int>(t->initial_window_update));
}
}
parser->incoming_settings[id] = parser->value;
if (grpc_http_trace.enabled()) {
- gpr_log(GPR_DEBUG, "CHTTP2:%s:%s: got setting %s = %d",
+ gpr_log(GPR_INFO, "CHTTP2:%s:%s: got setting %s = %d",
t->is_client ? "CLI" : "SVR", t->peer_string, sp->name,
parser->value);
}
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.cc b/src/core/ext/transport/chttp2/transport/hpack_encoder.cc
index e4f3c1b81e..d5ef063883 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.cc
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.cc
@@ -470,7 +470,7 @@ static void hpack_enc(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
v = grpc_slice_to_c_string(GRPC_MDVALUE(elem));
}
gpr_log(
- GPR_DEBUG,
+ GPR_INFO,
"Encode: '%s: %s', elem_interned=%d [%d], k_interned=%d, v_interned=%d",
k, v, GRPC_MDELEM_IS_INTERNED(elem), GRPC_MDELEM_STORAGE(elem),
grpc_slice_is_interned(GRPC_MDKEY(elem)),
@@ -654,7 +654,7 @@ void grpc_chttp2_hpack_compressor_set_max_table_size(
}
c->advertise_table_size_change = 1;
if (grpc_http_trace.enabled()) {
- gpr_log(GPR_DEBUG, "set max table size from encoder to %d", max_table_size);
+ gpr_log(GPR_INFO, "set max table size from encoder to %d", max_table_size);
}
}
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.cc b/src/core/ext/transport/chttp2/transport/hpack_parser.cc
index fc96a8b3e4..907ba71178 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.cc
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.cc
@@ -633,7 +633,7 @@ static grpc_error* on_hdr(grpc_chttp2_hpack_parser* p, grpc_mdelem md,
v = grpc_slice_to_c_string(GRPC_MDVALUE(md));
}
gpr_log(
- GPR_DEBUG,
+ GPR_INFO,
"Decode: '%s: %s', elem_interned=%d [%d], k_interned=%d, v_interned=%d",
k, v, GRPC_MDELEM_IS_INTERNED(md), GRPC_MDELEM_STORAGE(md),
grpc_slice_is_interned(GRPC_MDKEY(md)),
diff --git a/src/core/ext/transport/chttp2/transport/hpack_table.cc b/src/core/ext/transport/chttp2/transport/hpack_table.cc
index f050f502f5..7929258356 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_table.cc
+++ b/src/core/ext/transport/chttp2/transport/hpack_table.cc
@@ -247,7 +247,7 @@ void grpc_chttp2_hptbl_set_max_bytes(grpc_chttp2_hptbl* tbl,
return;
}
if (grpc_http_trace.enabled()) {
- gpr_log(GPR_DEBUG, "Update hpack parser max size to %d", max_bytes);
+ gpr_log(GPR_INFO, "Update hpack parser max size to %d", max_bytes);
}
while (tbl->mem_used > max_bytes) {
evict1(tbl);
@@ -270,7 +270,7 @@ grpc_error* grpc_chttp2_hptbl_set_current_table_size(grpc_chttp2_hptbl* tbl,
return err;
}
if (grpc_http_trace.enabled()) {
- gpr_log(GPR_DEBUG, "Update hpack parser table size to %d", bytes);
+ gpr_log(GPR_INFO, "Update hpack parser table size to %d", bytes);
}
while (tbl->mem_used > bytes) {
evict1(tbl);
diff --git a/src/core/ext/transport/chttp2/transport/stream_lists.cc b/src/core/ext/transport/chttp2/transport/stream_lists.cc
index 5d3ec4b53b..6626170a7e 100644
--- a/src/core/ext/transport/chttp2/transport/stream_lists.cc
+++ b/src/core/ext/transport/chttp2/transport/stream_lists.cc
@@ -68,7 +68,7 @@ static bool stream_list_pop(grpc_chttp2_transport* t,
}
*stream = s;
if (s && grpc_trace_http2_stream_state.enabled()) {
- gpr_log(GPR_DEBUG, "%p[%d][%s]: pop from %s", t, s->id,
+ gpr_log(GPR_INFO, "%p[%d][%s]: pop from %s", t, s->id,
t->is_client ? "cli" : "svr", stream_list_id_string(id));
}
return s != nullptr;
@@ -90,7 +90,7 @@ static void stream_list_remove(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
t->lists[id].tail = s->links[id].prev;
}
if (grpc_trace_http2_stream_state.enabled()) {
- gpr_log(GPR_DEBUG, "%p[%d][%s]: remove from %s", t, s->id,
+ gpr_log(GPR_INFO, "%p[%d][%s]: remove from %s", t, s->id,
t->is_client ? "cli" : "svr", stream_list_id_string(id));
}
}
@@ -122,7 +122,7 @@ static void stream_list_add_tail(grpc_chttp2_transport* t,
t->lists[id].tail = s;
s->included[id] = 1;
if (grpc_trace_http2_stream_state.enabled()) {
- gpr_log(GPR_DEBUG, "%p[%d][%s]: add to %s", t, s->id,
+ gpr_log(GPR_INFO, "%p[%d][%s]: add to %s", t, s->id,
t->is_client ? "cli" : "svr", stream_list_id_string(id));
}
}
diff --git a/src/core/ext/transport/chttp2/transport/writing.cc b/src/core/ext/transport/chttp2/transport/writing.cc
index 6f32397a3a..85efe27080 100644
--- a/src/core/ext/transport/chttp2/transport/writing.cc
+++ b/src/core/ext/transport/chttp2/transport/writing.cc
@@ -52,7 +52,7 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_INFLIGHT])) {
/* ping already in-flight: wait */
if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%s: Ping delayed [%p]: already pinging",
+ gpr_log(GPR_INFO, "%s: Ping delayed [%p]: already pinging",
t->is_client ? "CLIENT" : "SERVER", t->peer_string);
}
return;
@@ -61,7 +61,7 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
t->ping_policy.max_pings_without_data != 0) {
/* need to receive something of substance before sending a ping again */
if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%s: Ping delayed [%p]: too many recent pings: %d/%d",
+ gpr_log(GPR_INFO, "%s: Ping delayed [%p]: too many recent pings: %d/%d",
t->is_client ? "CLIENT" : "SERVER", t->peer_string,
t->ping_state.pings_before_data_required,
t->ping_policy.max_pings_without_data);
@@ -81,7 +81,7 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
if (next_allowed_ping > now) {
/* not enough elapsed time between successive pings */
if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"%s: Ping delayed [%p]: not enough time elapsed since last ping. "
" Last ping %f: Next ping %f: Now %f",
t->is_client ? "CLIENT" : "SERVER", t->peer_string,
@@ -107,7 +107,7 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
GRPC_STATS_INC_HTTP2_PINGS_SENT();
t->ping_state.last_ping_sent_time = now;
if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%s: Ping sent [%p]: %d/%d",
+ gpr_log(GPR_INFO, "%s: Ping sent [%p]: %d/%d",
t->is_client ? "CLIENT" : "SERVER", t->peer_string,
t->ping_state.pings_before_data_required,
t->ping_policy.max_pings_without_data);
@@ -401,7 +401,7 @@ class StreamWriteContext {
StreamWriteContext(WriteContext* write_context, grpc_chttp2_stream* s)
: write_context_(write_context), t_(write_context->transport()), s_(s) {
GRPC_CHTTP2_IF_TRACING(
- gpr_log(GPR_DEBUG, "W:%p %s[%d] im-(sent,send)=(%d,%d) announce=%d", t_,
+ gpr_log(GPR_INFO, "W:%p %s[%d] im-(sent,send)=(%d,%d) announce=%d", t_,
t_->is_client ? "CLIENT" : "SERVER", s->id,
s->sent_initial_metadata, s->send_initial_metadata != nullptr,
(int)(s->flow_control->local_window_delta() -
diff --git a/src/core/ext/transport/inproc/inproc_transport.cc b/src/core/ext/transport/inproc/inproc_transport.cc
index 67a380077b..2c3bff5c1e 100644
--- a/src/core/ext/transport/inproc/inproc_transport.cc
+++ b/src/core/ext/transport/inproc/inproc_transport.cc
@@ -125,12 +125,12 @@ static bool cancel_stream_locked(inproc_stream* s, grpc_error* error);
static void op_state_machine(void* arg, grpc_error* error);
static void ref_transport(inproc_transport* t) {
- INPROC_LOG(GPR_DEBUG, "ref_transport %p", t);
+ INPROC_LOG(GPR_INFO, "ref_transport %p", t);
gpr_ref(&t->refs);
}
static void really_destroy_transport(inproc_transport* t) {
- INPROC_LOG(GPR_DEBUG, "really_destroy_transport %p", t);
+ INPROC_LOG(GPR_INFO, "really_destroy_transport %p", t);
grpc_connectivity_state_destroy(&t->connectivity);
if (gpr_unref(&t->mu->refs)) {
gpr_free(t->mu);
@@ -139,7 +139,7 @@ static void really_destroy_transport(inproc_transport* t) {
}
static void unref_transport(inproc_transport* t) {
- INPROC_LOG(GPR_DEBUG, "unref_transport %p", t);
+ INPROC_LOG(GPR_INFO, "unref_transport %p", t);
if (gpr_unref(&t->refs)) {
really_destroy_transport(t);
}
@@ -154,17 +154,17 @@ static void unref_transport(inproc_transport* t) {
#endif
static void ref_stream(inproc_stream* s, const char* reason) {
- INPROC_LOG(GPR_DEBUG, "ref_stream %p %s", s, reason);
+ INPROC_LOG(GPR_INFO, "ref_stream %p %s", s, reason);
STREAM_REF(s->refs, reason);
}
static void unref_stream(inproc_stream* s, const char* reason) {
- INPROC_LOG(GPR_DEBUG, "unref_stream %p %s", s, reason);
+ INPROC_LOG(GPR_INFO, "unref_stream %p %s", s, reason);
STREAM_UNREF(s->refs, reason);
}
static void really_destroy_stream(inproc_stream* s) {
- INPROC_LOG(GPR_DEBUG, "really_destroy_stream %p", s);
+ INPROC_LOG(GPR_INFO, "really_destroy_stream %p", s);
GRPC_ERROR_UNREF(s->write_buffer_cancel_error);
GRPC_ERROR_UNREF(s->cancel_self_error);
@@ -225,7 +225,7 @@ static grpc_error* fill_in_metadata(inproc_stream* s,
static int init_stream(grpc_transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount, const void* server_data,
gpr_arena* arena) {
- INPROC_LOG(GPR_DEBUG, "init_stream %p %p %p", gt, gs, server_data);
+ INPROC_LOG(GPR_INFO, "init_stream %p %p %p", gt, gs, server_data);
inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
s->arena = arena;
@@ -282,8 +282,8 @@ static int init_stream(grpc_transport* gt, grpc_stream* gs,
// Pass the client-side stream address to the server-side for a ref
ref_stream(s, "inproc_init_stream:clt"); // ref it now on behalf of server
// side to avoid destruction
- INPROC_LOG(GPR_DEBUG, "calling accept stream cb %p %p",
- st->accept_stream_cb, st->accept_stream_data);
+ INPROC_LOG(GPR_INFO, "calling accept stream cb %p %p", st->accept_stream_cb,
+ st->accept_stream_data);
(*st->accept_stream_cb)(st->accept_stream_data, &st->base, (void*)s);
} else {
// This is the server-side and is being called through accept_stream_cb
@@ -378,7 +378,7 @@ static void complete_if_batch_end_locked(inproc_stream* s, grpc_error* error,
int is_rtm = static_cast<int>(op == s->recv_trailing_md_op);
if ((is_sm + is_stm + is_rim + is_rm + is_rtm) == 1) {
- INPROC_LOG(GPR_DEBUG, "%s %p %p %p", msg, s, op, error);
+ INPROC_LOG(GPR_INFO, "%s %p %p %p", msg, s, op, error);
GRPC_CLOSURE_SCHED(op->on_complete, GRPC_ERROR_REF(error));
}
}
@@ -393,7 +393,7 @@ static void maybe_schedule_op_closure_locked(inproc_stream* s,
}
static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
- INPROC_LOG(GPR_DEBUG, "op_state_machine %p fail_helper", s);
+ INPROC_LOG(GPR_INFO, "op_state_machine %p fail_helper", s);
// If we're failing this side, we need to make sure that
// we also send or have already sent trailing metadata
if (!s->trailing_md_sent) {
@@ -458,7 +458,7 @@ static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
*s->recv_initial_md_op->payload->recv_initial_metadata
.trailing_metadata_available = true;
}
- INPROC_LOG(GPR_DEBUG,
+ INPROC_LOG(GPR_INFO,
"fail_helper %p scheduling initial-metadata-ready %p %p", s,
error, err);
GRPC_CLOSURE_SCHED(s->recv_initial_md_op->payload->recv_initial_metadata
@@ -472,7 +472,7 @@ static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
s->recv_initial_md_op = nullptr;
}
if (s->recv_message_op) {
- INPROC_LOG(GPR_DEBUG, "fail_helper %p scheduling message-ready %p", s,
+ INPROC_LOG(GPR_INFO, "fail_helper %p scheduling message-ready %p", s,
error);
GRPC_CLOSURE_SCHED(
s->recv_message_op->payload->recv_message.recv_message_ready,
@@ -496,9 +496,8 @@ static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
s->send_trailing_md_op = nullptr;
}
if (s->recv_trailing_md_op) {
- INPROC_LOG(GPR_DEBUG,
- "fail_helper %p scheduling trailing-md-on-complete %p", s,
- error);
+ INPROC_LOG(GPR_INFO, "fail_helper %p scheduling trailing-md-on-complete %p",
+ s, error);
complete_if_batch_end_locked(
s, error, s->recv_trailing_md_op,
"fail_helper scheduling recv-trailing-metadata-on-complete");
@@ -549,7 +548,7 @@ static void message_transfer_locked(inproc_stream* sender,
receiver->recv_stream.Init(&receiver->recv_message, 0);
receiver->recv_message_op->payload->recv_message.recv_message->reset(
receiver->recv_stream.get());
- INPROC_LOG(GPR_DEBUG, "message_transfer_locked %p scheduling message-ready",
+ INPROC_LOG(GPR_INFO, "message_transfer_locked %p scheduling message-ready",
receiver);
GRPC_CLOSURE_SCHED(
receiver->recv_message_op->payload->recv_message.recv_message_ready,
@@ -577,7 +576,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
bool needs_close = false;
- INPROC_LOG(GPR_DEBUG, "op_state_machine %p", arg);
+ INPROC_LOG(GPR_INFO, "op_state_machine %p", arg);
inproc_stream* s = static_cast<inproc_stream*>(arg);
gpr_mu* mu = &s->t->mu->mu; // keep aside in case s gets closed
gpr_mu_lock(mu);
@@ -626,7 +625,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
: &other->to_read_trailing_md_filled;
if (*destfilled || s->trailing_md_sent) {
// The buffer is already in use; that's an error!
- INPROC_LOG(GPR_DEBUG, "Extra trailing metadata %p", s);
+ INPROC_LOG(GPR_INFO, "Extra trailing metadata %p", s);
new_err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra trailing metadata");
fail_helper_locked(s, GRPC_ERROR_REF(new_err));
goto done;
@@ -639,7 +638,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
}
s->trailing_md_sent = true;
if (!s->t->is_client && s->trailing_md_recvd && s->recv_trailing_md_op) {
- INPROC_LOG(GPR_DEBUG,
+ INPROC_LOG(GPR_INFO,
"op_state_machine %p scheduling trailing-md-on-complete", s);
GRPC_CLOSURE_SCHED(s->recv_trailing_md_op->on_complete,
GRPC_ERROR_NONE);
@@ -658,7 +657,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
new_err =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already recvd initial md");
INPROC_LOG(
- GPR_DEBUG,
+ GPR_INFO,
"op_state_machine %p scheduling on_complete errors for already "
"recvd initial md %p",
s, new_err);
@@ -684,7 +683,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
}
grpc_metadata_batch_clear(&s->to_read_initial_md);
s->to_read_initial_md_filled = false;
- INPROC_LOG(GPR_DEBUG,
+ INPROC_LOG(GPR_INFO,
"op_state_machine %p scheduling initial-metadata-ready %p", s,
new_err);
GRPC_CLOSURE_SCHED(s->recv_initial_md_op->payload->recv_initial_metadata
@@ -696,7 +695,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
s->recv_initial_md_op = nullptr;
if (new_err != GRPC_ERROR_NONE) {
- INPROC_LOG(GPR_DEBUG,
+ INPROC_LOG(GPR_INFO,
"op_state_machine %p scheduling on_complete errors2 %p", s,
new_err);
fail_helper_locked(s, GRPC_ERROR_REF(new_err));
@@ -719,7 +718,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
new_err =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already recvd trailing md");
INPROC_LOG(
- GPR_DEBUG,
+ GPR_INFO,
"op_state_machine %p scheduling on_complete errors for already "
"recvd trailing md %p",
s, new_err);
@@ -729,7 +728,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
if (s->recv_message_op != nullptr) {
// This message needs to be wrapped up because it will never be
// satisfied
- INPROC_LOG(GPR_DEBUG, "op_state_machine %p scheduling message-ready", s);
+ INPROC_LOG(GPR_INFO, "op_state_machine %p scheduling message-ready", s);
GRPC_CLOSURE_SCHED(
s->recv_message_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
@@ -764,7 +763,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
// (If the server hasn't already sent its trailing md, it doesn't have
// a final status, so don't mark this op complete)
if (s->t->is_client || s->trailing_md_sent) {
- INPROC_LOG(GPR_DEBUG,
+ INPROC_LOG(GPR_INFO,
"op_state_machine %p scheduling trailing-md-on-complete %p",
s, new_err);
GRPC_CLOSURE_SCHED(s->recv_trailing_md_op->on_complete,
@@ -772,21 +771,21 @@ static void op_state_machine(void* arg, grpc_error* error) {
s->recv_trailing_md_op = nullptr;
needs_close = true;
} else {
- INPROC_LOG(GPR_DEBUG,
+ INPROC_LOG(GPR_INFO,
"op_state_machine %p server needs to delay handling "
"trailing-md-on-complete %p",
s, new_err);
}
} else {
INPROC_LOG(
- GPR_DEBUG,
+ GPR_INFO,
"op_state_machine %p has trailing md but not yet waiting for it", s);
}
}
if (s->trailing_md_recvd && s->recv_message_op) {
// No further message will come on this stream, so finish off the
// recv_message_op
- INPROC_LOG(GPR_DEBUG, "op_state_machine %p scheduling message-ready", s);
+ INPROC_LOG(GPR_INFO, "op_state_machine %p scheduling message-ready", s);
GRPC_CLOSURE_SCHED(
s->recv_message_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
@@ -810,7 +809,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
// Didn't get the item we wanted so we still need to get
// rescheduled
INPROC_LOG(
- GPR_DEBUG, "op_state_machine %p still needs closure %p %p %p %p %p", s,
+ GPR_INFO, "op_state_machine %p still needs closure %p %p %p %p %p", s,
s->send_message_op, s->send_trailing_md_op, s->recv_initial_md_op,
s->recv_message_op, s->recv_trailing_md_op);
s->ops_needed = true;
@@ -826,8 +825,7 @@ done:
static bool cancel_stream_locked(inproc_stream* s, grpc_error* error) {
bool ret = false; // was the cancel accepted
- INPROC_LOG(GPR_DEBUG, "cancel_stream %p with %s", s,
- grpc_error_string(error));
+ INPROC_LOG(GPR_INFO, "cancel_stream %p with %s", s, grpc_error_string(error));
if (s->cancel_self_error == GRPC_ERROR_NONE) {
ret = true;
s->cancel_self_error = GRPC_ERROR_REF(error);
@@ -877,7 +875,7 @@ static bool cancel_stream_locked(inproc_stream* s, grpc_error* error) {
static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
grpc_transport_stream_op_batch* op) {
- INPROC_LOG(GPR_DEBUG, "perform_stream_op %p %p %p", gt, gs, op);
+ INPROC_LOG(GPR_INFO, "perform_stream_op %p %p %p", gt, gs, op);
inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
gpr_mu* mu = &s->t->mu->mu; // save aside in case s gets closed
gpr_mu_lock(mu);
@@ -907,7 +905,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
// already self-canceled so still give it an error
error = GRPC_ERROR_REF(s->cancel_self_error);
} else {
- INPROC_LOG(GPR_DEBUG, "perform_stream_op %p %s%s%s%s%s%s%s", s,
+ INPROC_LOG(GPR_INFO, "perform_stream_op %p %s%s%s%s%s%s%s", s,
s->t->is_client ? "client" : "server",
op->send_initial_metadata ? " send_initial_metadata" : "",
op->send_message ? " send_message" : "",
@@ -936,7 +934,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
: &other->to_read_initial_md_filled;
if (*destfilled || s->initial_md_sent) {
// The buffer is already in use; that's an error!
- INPROC_LOG(GPR_DEBUG, "Extra initial metadata %p", s);
+ INPROC_LOG(GPR_INFO, "Extra initial metadata %p", s);
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra initial metadata");
} else {
if (!other || !other->closed) {
@@ -1013,7 +1011,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
true;
}
INPROC_LOG(
- GPR_DEBUG,
+ GPR_INFO,
"perform_stream_op error %p scheduling initial-metadata-ready %p",
s, error);
GRPC_CLOSURE_SCHED(
@@ -1022,14 +1020,14 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
}
if (op->recv_message) {
INPROC_LOG(
- GPR_DEBUG,
+ GPR_INFO,
"perform_stream_op error %p scheduling recv message-ready %p", s,
error);
GRPC_CLOSURE_SCHED(op->payload->recv_message.recv_message_ready,
GRPC_ERROR_REF(error));
}
}
- INPROC_LOG(GPR_DEBUG, "perform_stream_op %p scheduling on_complete %p", s,
+ INPROC_LOG(GPR_INFO, "perform_stream_op %p scheduling on_complete %p", s,
error);
GRPC_CLOSURE_SCHED(on_complete, GRPC_ERROR_REF(error));
}
@@ -1042,7 +1040,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
}
static void close_transport_locked(inproc_transport* t) {
- INPROC_LOG(GPR_DEBUG, "close_transport %p %d", t, t->is_closed);
+ INPROC_LOG(GPR_INFO, "close_transport %p %d", t, t->is_closed);
grpc_connectivity_state_set(
&t->connectivity, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Closing transport."),
@@ -1063,7 +1061,7 @@ static void close_transport_locked(inproc_transport* t) {
static void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
- INPROC_LOG(GPR_DEBUG, "perform_transport_op %p %p", t, op);
+ INPROC_LOG(GPR_INFO, "perform_transport_op %p %p", t, op);
gpr_mu_lock(&t->mu->mu);
if (op->on_connectivity_state_change) {
grpc_connectivity_state_notify_on_state_change(
@@ -1096,7 +1094,7 @@ static void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
static void destroy_stream(grpc_transport* gt, grpc_stream* gs,
grpc_closure* then_schedule_closure) {
- INPROC_LOG(GPR_DEBUG, "destroy_stream %p %p", gs, then_schedule_closure);
+ INPROC_LOG(GPR_INFO, "destroy_stream %p %p", gs, then_schedule_closure);
inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
s->closure_at_destroy = then_schedule_closure;
really_destroy_stream(s);
@@ -1104,7 +1102,7 @@ static void destroy_stream(grpc_transport* gt, grpc_stream* gs,
static void destroy_transport(grpc_transport* gt) {
inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
- INPROC_LOG(GPR_DEBUG, "destroy_transport %p", t);
+ INPROC_LOG(GPR_INFO, "destroy_transport %p", t);
gpr_mu_lock(&t->mu->mu);
close_transport_locked(t);
gpr_mu_unlock(&t->mu->mu);
@@ -1165,7 +1163,7 @@ static void inproc_transports_create(grpc_transport** server_transport,
const grpc_channel_args* server_args,
grpc_transport** client_transport,
const grpc_channel_args* client_args) {
- INPROC_LOG(GPR_DEBUG, "inproc_transports_create");
+ INPROC_LOG(GPR_INFO, "inproc_transports_create");
inproc_transport* st =
static_cast<inproc_transport*>(gpr_zalloc(sizeof(*st)));
inproc_transport* ct =
diff --git a/src/core/lib/channel/channel_args.cc b/src/core/lib/channel/channel_args.cc
index 66a86c2286..e49d532e11 100644
--- a/src/core/lib/channel/channel_args.cc
+++ b/src/core/lib/channel/channel_args.cc
@@ -411,3 +411,31 @@ grpc_arg grpc_channel_arg_pointer_create(
arg.value.pointer.vtable = vtable;
return arg;
}
+
+char* grpc_channel_args_string(const grpc_channel_args* args) {
+ if (args == nullptr) return nullptr;
+ gpr_strvec v;
+ gpr_strvec_init(&v);
+ for (size_t i = 0; i < args->num_args; ++i) {
+ const grpc_arg& arg = args->args[i];
+ char* s;
+ switch (arg.type) {
+ case GRPC_ARG_INTEGER:
+ gpr_asprintf(&s, "%s=%d", arg.key, arg.value.integer);
+ break;
+ case GRPC_ARG_STRING:
+ gpr_asprintf(&s, "%s=%s", arg.key, arg.value.string);
+ break;
+ case GRPC_ARG_POINTER:
+ gpr_asprintf(&s, "%s=%p", arg.key, arg.value.pointer.p);
+ break;
+ default:
+ gpr_asprintf(&s, "arg with unknown type");
+ }
+ gpr_strvec_add(&v, s);
+ }
+ char* result =
+ gpr_strjoin_sep(const_cast<const char**>(v.strs), v.count, ", ", nullptr);
+ gpr_strvec_destroy(&v);
+ return result;
+}
diff --git a/src/core/lib/channel/channel_args.h b/src/core/lib/channel/channel_args.h
index c0d6a17356..5ff303a9dc 100644
--- a/src/core/lib/channel/channel_args.h
+++ b/src/core/lib/channel/channel_args.h
@@ -124,4 +124,8 @@ grpc_arg grpc_channel_arg_integer_create(char* name, int value);
grpc_arg grpc_channel_arg_pointer_create(char* name, void* value,
const grpc_arg_pointer_vtable* vtable);
+// Returns a string representing channel args in human-readable form.
+// Callers takes ownership of result.
+char* grpc_channel_args_string(const grpc_channel_args* args);
+
#endif /* GRPC_CORE_LIB_CHANNEL_CHANNEL_ARGS_H */
diff --git a/src/core/lib/channel/handshaker.cc b/src/core/lib/channel/handshaker.cc
index 9b1af8d6cb..2faeb64cb6 100644
--- a/src/core/lib/channel/handshaker.cc
+++ b/src/core/lib/channel/handshaker.cc
@@ -22,11 +22,15 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/handshaker.h"
+#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/timer.h"
+grpc_core::TraceFlag grpc_handshaker_trace(false, "handshaker");
+
//
// grpc_handshaker
//
@@ -52,6 +56,10 @@ void grpc_handshaker_do_handshake(grpc_handshaker* handshaker,
args);
}
+const char* grpc_handshaker_name(grpc_handshaker* handshaker) {
+ return handshaker->vtable->name;
+}
+
//
// grpc_handshake_manager
//
@@ -127,6 +135,12 @@ static bool is_power_of_2(size_t n) { return (n & (n - 1)) == 0; }
void grpc_handshake_manager_add(grpc_handshake_manager* mgr,
grpc_handshaker* handshaker) {
+ if (grpc_handshaker_trace.enabled()) {
+ gpr_log(
+ GPR_INFO,
+ "handshake_manager %p: adding handshaker %s [%p] at index %" PRIuPTR,
+ mgr, grpc_handshaker_name(handshaker), handshaker, mgr->count);
+ }
gpr_mu_lock(&mgr->mu);
// To avoid allocating memory for each handshaker we add, we double
// the number of elements every time we need more.
@@ -172,23 +186,56 @@ void grpc_handshake_manager_shutdown(grpc_handshake_manager* mgr,
GRPC_ERROR_UNREF(why);
}
+static char* handshaker_args_string(grpc_handshaker_args* args) {
+ char* args_str = grpc_channel_args_string(args->args);
+ size_t num_args = args->args != nullptr ? args->args->num_args : 0;
+ size_t read_buffer_length =
+ args->read_buffer != nullptr ? args->read_buffer->length : 0;
+ char* str;
+ gpr_asprintf(&str,
+ "{endpoint=%p, args=%p {size=%" PRIuPTR
+ ": %s}, read_buffer=%p (length=%" PRIuPTR "), exit_early=%d}",
+ args->endpoint, args->args, num_args, args_str,
+ args->read_buffer, read_buffer_length, args->exit_early);
+ gpr_free(args_str);
+ return str;
+}
+
// Helper function to call either the next handshaker or the
// on_handshake_done callback.
// Returns true if we've scheduled the on_handshake_done callback.
static bool call_next_handshaker_locked(grpc_handshake_manager* mgr,
grpc_error* error) {
+ if (grpc_handshaker_trace.enabled()) {
+ char* args_str = handshaker_args_string(&mgr->args);
+ gpr_log(GPR_INFO,
+ "handshake_manager %p: error=%s shutdown=%d index=%" PRIuPTR
+ ", args=%s",
+ mgr, grpc_error_string(error), mgr->shutdown, mgr->index, args_str);
+ gpr_free(args_str);
+ }
GPR_ASSERT(mgr->index <= mgr->count);
// If we got an error or we've been shut down or we're exiting early or
// we've finished the last handshaker, invoke the on_handshake_done
// callback. Otherwise, call the next handshaker.
if (error != GRPC_ERROR_NONE || mgr->shutdown || mgr->args.exit_early ||
mgr->index == mgr->count) {
+ if (grpc_handshaker_trace.enabled()) {
+ gpr_log(GPR_INFO, "handshake_manager %p: handshaking complete", mgr);
+ }
// Cancel deadline timer, since we're invoking the on_handshake_done
// callback now.
grpc_timer_cancel(&mgr->deadline_timer);
GRPC_CLOSURE_SCHED(&mgr->on_handshake_done, error);
mgr->shutdown = true;
} else {
+ if (grpc_handshaker_trace.enabled()) {
+ gpr_log(
+ GPR_INFO,
+ "handshake_manager %p: calling handshaker %s [%p] at index %" PRIuPTR,
+ mgr, grpc_handshaker_name(mgr->handshakers[mgr->index]),
+ mgr->handshakers[mgr->index], mgr->index);
+ }
grpc_handshaker_do_handshake(mgr->handshakers[mgr->index], mgr->acceptor,
&mgr->call_next_handshaker, &mgr->args);
}
diff --git a/src/core/lib/channel/handshaker.h b/src/core/lib/channel/handshaker.h
index dfecd81004..be7fd127e4 100644
--- a/src/core/lib/channel/handshaker.h
+++ b/src/core/lib/channel/handshaker.h
@@ -84,6 +84,9 @@ typedef struct {
grpc_tcp_server_acceptor* acceptor,
grpc_closure* on_handshake_done,
grpc_handshaker_args* args);
+
+ /// The name of the handshaker, for debugging purposes.
+ const char* name;
} grpc_handshaker_vtable;
/// Base struct. To subclass, make this the first member of the
@@ -102,6 +105,7 @@ void grpc_handshaker_do_handshake(grpc_handshaker* handshaker,
grpc_tcp_server_acceptor* acceptor,
grpc_closure* on_handshake_done,
grpc_handshaker_args* args);
+const char* grpc_handshaker_name(grpc_handshaker* handshaker);
///
/// grpc_handshake_manager
diff --git a/src/core/lib/debug/trace.h b/src/core/lib/debug/trace.h
index bfec92c529..28157c6383 100644
--- a/src/core/lib/debug/trace.h
+++ b/src/core/lib/debug/trace.h
@@ -57,13 +57,22 @@ class TraceFlag {
const char* name() const { return name_; }
+// This following define may be commented out to ensure that the compiler
+// deletes any "if (tracer.enabled()) {...}" codeblocks. This is useful to
+// test the performance impact tracers have on the system.
+//
+// #define COMPILE_OUT_ALL_TRACERS_IN_OPT_BUILD
+#ifdef COMPILE_OUT_ALL_TRACERS_IN_OPT_BUILD
+ bool enabled() { return false; }
+#else
bool enabled() {
#ifdef GRPC_THREADSAFE_TRACER
return gpr_atm_no_barrier_load(&value_) != 0;
#else
return value_;
-#endif
+#endif // GRPC_THREADSAFE_TRACER
}
+#endif // COMPILE_OUT_ALL_TRACERS_IN_OPT_BUILD
private:
friend void grpc_core::testing::grpc_tracer_enable_flag(TraceFlag* flag);
diff --git a/src/core/lib/gprpp/orphanable.h b/src/core/lib/gprpp/orphanable.h
index a5bc8d8efc..73a73995c7 100644
--- a/src/core/lib/gprpp/orphanable.h
+++ b/src/core/lib/gprpp/orphanable.h
@@ -100,7 +100,7 @@ class InternallyRefCounted : public Orphanable {
void Unref() {
if (gpr_unref(&refs_)) {
- Delete(this);
+ Delete(static_cast<Child*>(this));
}
}
@@ -159,7 +159,7 @@ class InternallyRefCountedWithTracing : public Orphanable {
const char* reason) GRPC_MUST_USE_RESULT {
if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
- gpr_log(GPR_DEBUG, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s",
+ gpr_log(GPR_INFO, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s",
trace_flag_->name(), this, location.file(), location.line(),
old_refs, old_refs + 1, reason);
}
@@ -173,14 +173,14 @@ class InternallyRefCountedWithTracing : public Orphanable {
void Unref() {
if (gpr_unref(&refs_)) {
- Delete(this);
+ Delete(static_cast<Child*>(this));
}
}
void Unref(const DebugLocation& location, const char* reason) {
if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
- gpr_log(GPR_DEBUG, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s",
+ gpr_log(GPR_INFO, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s",
trace_flag_->name(), this, location.file(), location.line(),
old_refs, old_refs - 1, reason);
}
diff --git a/src/core/lib/gprpp/ref_counted.h b/src/core/lib/gprpp/ref_counted.h
index 46bfaf7fb8..c67e3f315c 100644
--- a/src/core/lib/gprpp/ref_counted.h
+++ b/src/core/lib/gprpp/ref_counted.h
@@ -54,7 +54,7 @@ class RefCounted {
// friend of this class.
void Unref() {
if (gpr_unref(&refs_)) {
- Delete(this);
+ Delete(static_cast<Child*>(this));
}
}
@@ -100,7 +100,7 @@ class RefCountedWithTracing {
const char* reason) GRPC_MUST_USE_RESULT {
if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
- gpr_log(GPR_DEBUG, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s",
+ gpr_log(GPR_INFO, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s",
trace_flag_->name(), this, location.file(), location.line(),
old_refs, old_refs + 1, reason);
}
@@ -114,14 +114,14 @@ class RefCountedWithTracing {
void Unref() {
if (gpr_unref(&refs_)) {
- Delete(this);
+ Delete(static_cast<Child*>(this));
}
}
void Unref(const DebugLocation& location, const char* reason) {
if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
- gpr_log(GPR_DEBUG, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s",
+ gpr_log(GPR_INFO, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s",
trace_flag_->name(), this, location.file(), location.line(),
old_refs, old_refs - 1, reason);
}
diff --git a/src/core/lib/iomgr/call_combiner.cc b/src/core/lib/iomgr/call_combiner.cc
index 24e11b687b..00a839b64c 100644
--- a/src/core/lib/iomgr/call_combiner.cc
+++ b/src/core/lib/iomgr/call_combiner.cc
@@ -64,7 +64,7 @@ void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
const char* reason) {
GPR_TIMER_SCOPE("call_combiner_start", 0);
if (grpc_call_combiner_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"==> grpc_call_combiner_start() [%p] closure=%p [" DEBUG_FMT_STR
"%s] error=%s",
call_combiner, closure DEBUG_FMT_ARGS, reason,
@@ -73,7 +73,7 @@ void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
size_t prev_size = static_cast<size_t>(
gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)1));
if (grpc_call_combiner_trace.enabled()) {
- gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
+ gpr_log(GPR_INFO, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
prev_size + 1);
}
GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS();
@@ -82,7 +82,7 @@ void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
GPR_TIMER_MARK("call_combiner_initiate", 0);
if (grpc_call_combiner_trace.enabled()) {
- gpr_log(GPR_DEBUG, " EXECUTING IMMEDIATELY");
+ gpr_log(GPR_INFO, " EXECUTING IMMEDIATELY");
}
// Queue was empty, so execute this closure immediately.
GRPC_CLOSURE_SCHED(closure, error);
@@ -101,21 +101,21 @@ void grpc_call_combiner_stop(grpc_call_combiner* call_combiner DEBUG_ARGS,
const char* reason) {
GPR_TIMER_SCOPE("call_combiner_stop", 0);
if (grpc_call_combiner_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"==> grpc_call_combiner_stop() [%p] [" DEBUG_FMT_STR "%s]",
call_combiner DEBUG_FMT_ARGS, reason);
}
size_t prev_size = static_cast<size_t>(
gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)-1));
if (grpc_call_combiner_trace.enabled()) {
- gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
+ gpr_log(GPR_INFO, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
prev_size - 1);
}
GPR_ASSERT(prev_size >= 1);
if (prev_size > 1) {
while (true) {
if (grpc_call_combiner_trace.enabled()) {
- gpr_log(GPR_DEBUG, " checking queue");
+ gpr_log(GPR_INFO, " checking queue");
}
bool empty;
grpc_closure* closure = reinterpret_cast<grpc_closure*>(
@@ -124,19 +124,19 @@ void grpc_call_combiner_stop(grpc_call_combiner* call_combiner DEBUG_ARGS,
// This can happen either due to a race condition within the mpscq
// code or because of a race with grpc_call_combiner_start().
if (grpc_call_combiner_trace.enabled()) {
- gpr_log(GPR_DEBUG, " queue returned no result; checking again");
+ gpr_log(GPR_INFO, " queue returned no result; checking again");
}
continue;
}
if (grpc_call_combiner_trace.enabled()) {
- gpr_log(GPR_DEBUG, " EXECUTING FROM QUEUE: closure=%p error=%s",
+ gpr_log(GPR_INFO, " EXECUTING FROM QUEUE: closure=%p error=%s",
closure, grpc_error_string(closure->error_data.error));
}
GRPC_CLOSURE_SCHED(closure, closure->error_data.error);
break;
}
} else if (grpc_call_combiner_trace.enabled()) {
- gpr_log(GPR_DEBUG, " queue empty");
+ gpr_log(GPR_INFO, " queue empty");
}
}
@@ -151,7 +151,7 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner,
// Otherwise, store the new closure.
if (original_error != GRPC_ERROR_NONE) {
if (grpc_call_combiner_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"call_combiner=%p: scheduling notify_on_cancel callback=%p "
"for pre-existing cancellation",
call_combiner, closure);
@@ -162,7 +162,7 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner,
if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state,
(gpr_atm)closure)) {
if (grpc_call_combiner_trace.enabled()) {
- gpr_log(GPR_DEBUG, "call_combiner=%p: setting notify_on_cancel=%p",
+ gpr_log(GPR_INFO, "call_combiner=%p: setting notify_on_cancel=%p",
call_combiner, closure);
}
// If we replaced an earlier closure, invoke the original
@@ -171,7 +171,7 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner,
if (original_state != 0) {
closure = (grpc_closure*)original_state;
if (grpc_call_combiner_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"call_combiner=%p: scheduling old cancel callback=%p",
call_combiner, closure);
}
@@ -199,7 +199,7 @@ void grpc_call_combiner_cancel(grpc_call_combiner* call_combiner,
if (original_state != 0) {
grpc_closure* notify_on_cancel = (grpc_closure*)original_state;
if (grpc_call_combiner_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"call_combiner=%p: scheduling notify_on_cancel callback=%p",
call_combiner, notify_on_cancel);
}
diff --git a/src/core/lib/iomgr/closure.h b/src/core/lib/iomgr/closure.h
index 64527d6bb1..34a494485d 100644
--- a/src/core/lib/iomgr/closure.h
+++ b/src/core/lib/iomgr/closure.h
@@ -253,8 +253,8 @@ inline void grpc_closure_run(grpc_closure* c, grpc_error* error) {
c->file_initiated = file;
c->line_initiated = line;
c->run = true;
+ GPR_ASSERT(c->cb != nullptr);
#endif
- assert(c->cb);
c->scheduler->vtable->run(c, error);
} else {
GRPC_ERROR_UNREF(error);
@@ -292,8 +292,8 @@ inline void grpc_closure_sched(grpc_closure* c, grpc_error* error) {
c->file_initiated = file;
c->line_initiated = line;
c->run = false;
+ GPR_ASSERT(c->cb != nullptr);
#endif
- assert(c->cb);
c->scheduler->vtable->sched(c, error);
} else {
GRPC_ERROR_UNREF(error);
@@ -330,8 +330,8 @@ inline void grpc_closure_list_sched(grpc_closure_list* list) {
c->file_initiated = file;
c->line_initiated = line;
c->run = false;
+ GPR_ASSERT(c->cb != nullptr);
#endif
- assert(c->cb);
c->scheduler->vtable->sched(c, c->error_data.error);
c = next;
}
diff --git a/src/core/lib/iomgr/combiner.cc b/src/core/lib/iomgr/combiner.cc
index e66df03182..9429842eb8 100644
--- a/src/core/lib/iomgr/combiner.cc
+++ b/src/core/lib/iomgr/combiner.cc
@@ -83,12 +83,12 @@ grpc_combiner* grpc_combiner_create(void) {
grpc_closure_list_init(&lock->final_list);
GRPC_CLOSURE_INIT(&lock->offload, offload, lock,
grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
- GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
+ GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p create", lock));
return lock;
}
static void really_destroy(grpc_combiner* lock) {
- GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p really_destroy", lock));
+ GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p really_destroy", lock));
GPR_ASSERT(gpr_atm_no_barrier_load(&lock->state) == 0);
gpr_mpscq_destroy(&lock->queue);
gpr_free(lock);
@@ -97,7 +97,7 @@ static void really_destroy(grpc_combiner* lock) {
static void start_destroy(grpc_combiner* lock) {
gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_UNORPHANED);
GRPC_COMBINER_TRACE(gpr_log(
- GPR_DEBUG, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state));
+ GPR_INFO, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state));
if (old_state == 1) {
really_destroy(lock);
}
@@ -159,7 +159,7 @@ static void combiner_exec(grpc_closure* cl, grpc_error* error) {
GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS();
grpc_combiner* lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler);
gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
- GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
+ GRPC_COMBINER_TRACE(gpr_log(GPR_INFO,
"C:%p grpc_combiner_execute c=%p last=%" PRIdPTR,
lock, cl, last));
if (last == 1) {
@@ -203,7 +203,7 @@ static void offload(void* arg, grpc_error* error) {
static void queue_offload(grpc_combiner* lock) {
GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED();
move_next();
- GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload", lock));
+ GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p queue_offload", lock));
GRPC_CLOSURE_SCHED(&lock->offload, GRPC_ERROR_NONE);
}
@@ -218,7 +218,7 @@ bool grpc_combiner_continue_exec_ctx() {
bool contended =
gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null) == 0;
- GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
+ GRPC_COMBINER_TRACE(gpr_log(GPR_INFO,
"C:%p grpc_combiner_continue_exec_ctx "
"contended=%d "
"exec_ctx_ready_to_finish=%d "
@@ -242,7 +242,7 @@ bool grpc_combiner_continue_exec_ctx() {
(gpr_atm_acq_load(&lock->state) >> 1) > 1) {
gpr_mpscq_node* n = gpr_mpscq_pop(&lock->queue);
GRPC_COMBINER_TRACE(
- gpr_log(GPR_DEBUG, "C:%p maybe_finish_one n=%p", lock, n));
+ gpr_log(GPR_INFO, "C:%p maybe_finish_one n=%p", lock, n));
if (n == nullptr) {
// queue is in an inconsistent state: use this as a cue that we should
// go off and do something else for a while (and come back later)
@@ -266,7 +266,7 @@ bool grpc_combiner_continue_exec_ctx() {
while (c != nullptr) {
GPR_TIMER_SCOPE("combiner.exec_1final", 0);
GRPC_COMBINER_TRACE(
- gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c));
+ gpr_log(GPR_INFO, "C:%p execute_final[%d] c=%p", lock, loops, c));
grpc_closure* next = c->next_data.next;
grpc_error* error = c->error_data.error;
#ifndef NDEBUG
@@ -284,7 +284,7 @@ bool grpc_combiner_continue_exec_ctx() {
gpr_atm old_state =
gpr_atm_full_fetch_add(&lock->state, -STATE_ELEM_COUNT_LOW_BIT);
GRPC_COMBINER_TRACE(
- gpr_log(GPR_DEBUG, "C:%p finish old_state=%" PRIdPTR, lock, old_state));
+ gpr_log(GPR_INFO, "C:%p finish old_state=%" PRIdPTR, lock, old_state));
// Define a macro to ease readability of the following switch statement.
#define OLD_STATE_WAS(orphaned, elem_count) \
(((orphaned) ? 0 : STATE_UNORPHANED) | \
@@ -327,8 +327,8 @@ static void combiner_finally_exec(grpc_closure* closure, grpc_error* error) {
grpc_combiner* lock =
COMBINER_FROM_CLOSURE_SCHEDULER(closure, finally_scheduler);
GRPC_COMBINER_TRACE(gpr_log(
- GPR_DEBUG, "C:%p grpc_combiner_execute_finally c=%p; ac=%p", lock,
- closure, grpc_core::ExecCtx::Get()->combiner_data()->active_combiner));
+ GPR_INFO, "C:%p grpc_combiner_execute_finally c=%p; ac=%p", lock, closure,
+ grpc_core::ExecCtx::Get()->combiner_data()->active_combiner));
if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner != lock) {
GPR_TIMER_MARK("slowpath", 0);
GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(enqueue_finally, closure,
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.cc b/src/core/lib/iomgr/ev_epoll1_linux.cc
index 3ebaf181c1..e5db1be0e0 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.cc
+++ b/src/core/lib/iomgr/ev_epoll1_linux.cc
@@ -658,7 +658,7 @@ static grpc_error* do_epoll_wait(grpc_pollset* ps, grpc_millis deadline) {
GRPC_STATS_INC_POLL_EVENTS_RETURNED(r);
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r);
+ gpr_log(GPR_INFO, "ps: %p poll got %d events", ps, r);
}
gpr_atm_rel_store(&g_epoll_set.num_events, r);
@@ -678,7 +678,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
pollset->begin_refs++;
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "PS:%p BEGIN_STARTS:%p", pollset, worker);
+ gpr_log(GPR_INFO, "PS:%p BEGIN_STARTS:%p", pollset, worker);
}
if (pollset->seen_inactive) {
@@ -697,7 +697,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
gpr_mu_lock(&neighborhood->mu);
gpr_mu_lock(&pollset->mu);
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
+ gpr_log(GPR_INFO, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
pollset, worker, kick_state_string(worker->state),
is_reassigning);
}
@@ -749,7 +749,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
gpr_cv_init(&worker->cv);
while (worker->state == UNKICKED && !pollset->shutting_down) {
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
+ gpr_log(GPR_INFO, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
pollset, worker, kick_state_string(worker->state),
pollset->shutting_down);
}
@@ -766,7 +766,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
}
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
"kicked_without_poller: %d",
pollset, worker, kick_state_string(worker->state),
@@ -809,7 +809,7 @@ static bool check_neighborhood_for_available_poller(
if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
(gpr_atm)inspect_worker)) {
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. choose next poller to be %p",
+ gpr_log(GPR_INFO, " .. choose next poller to be %p",
inspect_worker);
}
SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
@@ -820,7 +820,7 @@ static bool check_neighborhood_for_available_poller(
}
} else {
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. beaten to choose next poller");
+ gpr_log(GPR_INFO, " .. beaten to choose next poller");
}
}
// even if we didn't win the cas, there's a worker, we can stop
@@ -838,7 +838,7 @@ static bool check_neighborhood_for_available_poller(
}
if (!found_worker) {
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. mark pollset %p inactive", inspect);
+ gpr_log(GPR_INFO, " .. mark pollset %p inactive", inspect);
}
inspect->seen_inactive = true;
if (inspect == neighborhood->active_root) {
@@ -858,7 +858,7 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
grpc_pollset_worker** worker_hdl) {
GPR_TIMER_SCOPE("end_worker", 0);
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "PS:%p END_WORKER:%p", pollset, worker);
+ gpr_log(GPR_INFO, "PS:%p END_WORKER:%p", pollset, worker);
}
if (worker_hdl != nullptr) *worker_hdl = nullptr;
/* Make sure we appear kicked */
@@ -868,7 +868,7 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
if (worker->next != worker && worker->next->state == UNKICKED) {
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. choose next poller to be peer %p", worker);
+ gpr_log(GPR_INFO, " .. choose next poller to be peer %p", worker);
}
GPR_ASSERT(worker->next->initialized_cv);
gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
@@ -920,7 +920,7 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
gpr_cv_destroy(&worker->cv);
}
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. remove worker");
+ gpr_log(GPR_INFO, " .. remove worker");
}
if (EMPTIED == worker_remove(pollset, worker)) {
pollset_maybe_finish_shutdown(pollset);
@@ -1022,7 +1022,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
pollset->kicked_without_poller = true;
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. kicked_without_poller");
+ gpr_log(GPR_INFO, " .. kicked_without_poller");
}
goto done;
}
@@ -1030,14 +1030,14 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
if (root_worker->state == KICKED) {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. already kicked %p", root_worker);
+ gpr_log(GPR_INFO, " .. already kicked %p", root_worker);
}
SET_KICK_STATE(root_worker, KICKED);
goto done;
} else if (next_worker->state == KICKED) {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. already kicked %p", next_worker);
+ gpr_log(GPR_INFO, " .. already kicked %p", next_worker);
}
SET_KICK_STATE(next_worker, KICKED);
goto done;
@@ -1048,7 +1048,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
&g_active_poller)) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. kicked %p", root_worker);
+ gpr_log(GPR_INFO, " .. kicked %p", root_worker);
}
SET_KICK_STATE(root_worker, KICKED);
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
@@ -1056,7 +1056,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
} else if (next_worker->state == UNKICKED) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. kicked %p", next_worker);
+ gpr_log(GPR_INFO, " .. kicked %p", next_worker);
}
GPR_ASSERT(next_worker->initialized_cv);
SET_KICK_STATE(next_worker, KICKED);
@@ -1066,7 +1066,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
if (root_worker->state != DESIGNATED_POLLER) {
if (grpc_polling_trace.enabled()) {
gpr_log(
- GPR_DEBUG,
+ GPR_INFO,
" .. kicked root non-poller %p (initialized_cv=%d) (poller=%p)",
root_worker, root_worker->initialized_cv, next_worker);
}
@@ -1079,7 +1079,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
} else {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. non-root poller %p (root=%p)", next_worker,
+ gpr_log(GPR_INFO, " .. non-root poller %p (root=%p)", next_worker,
root_worker);
}
SET_KICK_STATE(next_worker, KICKED);
@@ -1095,7 +1095,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
} else {
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. kicked while waking up");
+ gpr_log(GPR_INFO, " .. kicked while waking up");
}
goto done;
}
@@ -1105,14 +1105,14 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
if (specific_worker->state == KICKED) {
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. specific worker already kicked");
+ gpr_log(GPR_INFO, " .. specific worker already kicked");
}
goto done;
} else if (gpr_tls_get(&g_current_thread_worker) ==
(intptr_t)specific_worker) {
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. mark %p kicked", specific_worker);
+ gpr_log(GPR_INFO, " .. mark %p kicked", specific_worker);
}
SET_KICK_STATE(specific_worker, KICKED);
goto done;
@@ -1120,7 +1120,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
(grpc_pollset_worker*)gpr_atm_no_barrier_load(&g_active_poller)) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. kick active poller");
+ gpr_log(GPR_INFO, " .. kick active poller");
}
SET_KICK_STATE(specific_worker, KICKED);
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
@@ -1128,7 +1128,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
} else if (specific_worker->initialized_cv) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. kick waiting worker");
+ gpr_log(GPR_INFO, " .. kick waiting worker");
}
SET_KICK_STATE(specific_worker, KICKED);
gpr_cv_signal(&specific_worker->cv);
@@ -1136,7 +1136,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
} else {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. kick non-waiting worker");
+ gpr_log(GPR_INFO, " .. kick non-waiting worker");
}
SET_KICK_STATE(specific_worker, KICKED);
goto done;
diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc
index 44d8cf2b1e..65f1c912af 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.cc
+++ b/src/core/lib/iomgr/ev_epollex_linux.cc
@@ -518,7 +518,7 @@ static grpc_error* pollable_add_fd(pollable* p, grpc_fd* fd) {
const int epfd = p->epfd;
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "add fd %p (%d) to pollable %p", fd, fd->fd, p);
+ gpr_log(GPR_INFO, "add fd %p (%d) to pollable %p", fd, fd->fd, p);
}
struct epoll_event ev_fd;
@@ -560,7 +560,7 @@ static void pollset_global_shutdown(void) {
/* pollset->mu must be held while calling this function */
static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"PS:%p (pollable:%p) maybe_finish_shutdown sc=%p (target:!NULL) "
"rw=%p (target:NULL) cpsc=%d (target:0)",
pollset, pollset->active_pollable, pollset->shutdown_closure,
@@ -585,14 +585,14 @@ static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) {
GPR_ASSERT(specific_worker != nullptr);
if (specific_worker->kicked) {
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_already_kicked", p);
+ gpr_log(GPR_INFO, "PS:%p kicked_specific_but_already_kicked", p);
}
GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
return GRPC_ERROR_NONE;
}
if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) {
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_awake", p);
+ gpr_log(GPR_INFO, "PS:%p kicked_specific_but_awake", p);
}
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
specific_worker->kicked = true;
@@ -601,7 +601,7 @@ static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) {
if (specific_worker == p->root_worker) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_wakeup_fd", p);
+ gpr_log(GPR_INFO, "PS:%p kicked_specific_via_wakeup_fd", p);
}
specific_worker->kicked = true;
grpc_error* error = grpc_wakeup_fd_wakeup(&p->wakeup);
@@ -610,7 +610,7 @@ static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) {
if (specific_worker->initialized_cv) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_cv", p);
+ gpr_log(GPR_INFO, "PS:%p kicked_specific_via_cv", p);
}
specific_worker->kicked = true;
gpr_cv_signal(&specific_worker->cv);
@@ -626,7 +626,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
GPR_TIMER_SCOPE("pollset_kick", 0);
GRPC_STATS_INC_POLLSET_KICK();
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"PS:%p kick %p tls_pollset=%p tls_worker=%p pollset.root_worker=%p",
pollset, specific_worker,
(void*)gpr_tls_get(&g_current_thread_pollset),
@@ -636,7 +636,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
if (pollset->root_worker == nullptr) {
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "PS:%p kicked_any_without_poller", pollset);
+ gpr_log(GPR_INFO, "PS:%p kicked_any_without_poller", pollset);
}
GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
pollset->kicked_without_poller = true;
@@ -662,7 +662,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
}
} else {
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "PS:%p kicked_any_but_awake", pollset);
+ gpr_log(GPR_INFO, "PS:%p kicked_any_but_awake", pollset);
}
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
return GRPC_ERROR_NONE;
@@ -784,7 +784,7 @@ static grpc_error* pollable_process_events(grpc_pollset* pollset,
void* data_ptr = ev->data.ptr;
if (1 & (intptr_t)data_ptr) {
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
+ gpr_log(GPR_INFO, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
}
append_error(&error,
grpc_wakeup_fd_consume_wakeup(
@@ -797,7 +797,7 @@ static grpc_error* pollable_process_events(grpc_pollset* pollset,
bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
bool write_ev = (ev->events & EPOLLOUT) != 0;
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"PS:%p got fd %p: cancel=%d read=%d "
"write=%d",
pollset, fd, cancel, read_ev, write_ev);
@@ -827,7 +827,7 @@ static grpc_error* pollable_epoll(pollable* p, grpc_millis deadline) {
if (grpc_polling_trace.enabled()) {
char* desc = pollable_desc(p);
- gpr_log(GPR_DEBUG, "POLLABLE:%p[%s] poll for %dms", p, desc, timeout);
+ gpr_log(GPR_INFO, "POLLABLE:%p[%s] poll for %dms", p, desc, timeout);
gpr_free(desc);
}
@@ -846,7 +846,7 @@ static grpc_error* pollable_epoll(pollable* p, grpc_millis deadline) {
if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "POLLABLE:%p got %d events", p, r);
+ gpr_log(GPR_INFO, "POLLABLE:%p got %d events", p, r);
}
p->event_cursor = 0;
@@ -917,7 +917,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
gpr_mu_unlock(&pollset->mu);
if (grpc_polling_trace.enabled() &&
worker->pollable_obj->root_worker != worker) {
- gpr_log(GPR_DEBUG, "PS:%p wait %p w=%p for %dms", pollset,
+ gpr_log(GPR_INFO, "PS:%p wait %p w=%p for %dms", pollset,
worker->pollable_obj, worker,
poll_deadline_to_millis_timeout(deadline));
}
@@ -925,19 +925,19 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->mu,
grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) {
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "PS:%p timeout_wait %p w=%p", pollset,
+ gpr_log(GPR_INFO, "PS:%p timeout_wait %p w=%p", pollset,
worker->pollable_obj, worker);
}
do_poll = false;
} else if (worker->kicked) {
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "PS:%p wakeup %p w=%p", pollset,
+ gpr_log(GPR_INFO, "PS:%p wakeup %p w=%p", pollset,
worker->pollable_obj, worker);
}
do_poll = false;
} else if (grpc_polling_trace.enabled() &&
worker->pollable_obj->root_worker != worker) {
- gpr_log(GPR_DEBUG, "PS:%p spurious_wakeup %p w=%p", pollset,
+ gpr_log(GPR_INFO, "PS:%p spurious_wakeup %p w=%p", pollset,
worker->pollable_obj, worker);
}
}
@@ -1009,7 +1009,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
WORKER_PTR->originator = gettid();
#endif
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"PS:%p work hdl=%p worker=%p now=%" PRIdPTR " deadline=%" PRIdPTR
" kwp=%d pollable=%p",
pollset, worker_hdl, WORKER_PTR, grpc_core::ExecCtx::Get()->Now(),
@@ -1050,7 +1050,7 @@ static grpc_error* pollset_transition_pollable_from_empty_to_fd_locked(
static const char* err_desc = "pollset_transition_pollable_from_empty_to_fd";
grpc_error* error = GRPC_ERROR_NONE;
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"PS:%p add fd %p (%d); transition pollable from empty to fd",
pollset, fd, fd->fd);
}
@@ -1067,7 +1067,7 @@ static grpc_error* pollset_transition_pollable_from_fd_to_multi_locked(
grpc_error* error = GRPC_ERROR_NONE;
if (grpc_polling_trace.enabled()) {
gpr_log(
- GPR_DEBUG,
+ GPR_INFO,
"PS:%p add fd %p (%d); transition pollable from fd %p to multipoller",
pollset, and_add_fd, and_add_fd ? and_add_fd->fd : -1,
pollset->active_pollable->owner_fd);
@@ -1137,7 +1137,7 @@ static grpc_error* pollset_as_multipollable_locked(grpc_pollset* pollset,
/* Any workers currently polling on this pollset must now be woked up so
* that they can pick up the new active_pollable */
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"PS:%p active pollable transition from empty to multi",
pollset);
}
@@ -1224,7 +1224,7 @@ static void pollset_set_unref(grpc_pollset_set* pss) {
static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {
GPR_TIMER_SCOPE("pollset_set_add_fd", 0);
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "PSS:%p: add fd %p (%d)", pss, fd, fd->fd);
+ gpr_log(GPR_INFO, "PSS:%p: add fd %p (%d)", pss, fd, fd->fd);
}
grpc_error* error = GRPC_ERROR_NONE;
static const char* err_desc = "pollset_set_add_fd";
@@ -1248,7 +1248,7 @@ static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {
static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {
GPR_TIMER_SCOPE("pollset_set_del_fd", 0);
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "PSS:%p: del fd %p", pss, fd);
+ gpr_log(GPR_INFO, "PSS:%p: del fd %p", pss, fd);
}
pss = pss_lock_adam(pss);
size_t i;
@@ -1269,7 +1269,7 @@ static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {
static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
GPR_TIMER_SCOPE("pollset_set_del_pollset", 0);
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "PSS:%p: del pollset %p", pss, ps);
+ gpr_log(GPR_INFO, "PSS:%p: del pollset %p", pss, ps);
}
pss = pss_lock_adam(pss);
size_t i;
@@ -1321,7 +1321,7 @@ static grpc_error* add_fds_to_pollsets(grpc_fd** fds, size_t fd_count,
static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
GPR_TIMER_SCOPE("pollset_set_add_pollset", 0);
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "PSS:%p: add pollset %p", pss, ps);
+ gpr_log(GPR_INFO, "PSS:%p: add pollset %p", pss, ps);
}
grpc_error* error = GRPC_ERROR_NONE;
static const char* err_desc = "pollset_set_add_pollset";
@@ -1358,7 +1358,7 @@ static void pollset_set_add_pollset_set(grpc_pollset_set* a,
grpc_pollset_set* b) {
GPR_TIMER_SCOPE("pollset_set_add_pollset_set", 0);
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "PSS: merge (%p, %p)", a, b);
+ gpr_log(GPR_INFO, "PSS: merge (%p, %p)", a, b);
}
grpc_error* error = GRPC_ERROR_NONE;
static const char* err_desc = "pollset_set_add_fd";
@@ -1392,7 +1392,7 @@ static void pollset_set_add_pollset_set(grpc_pollset_set* a,
GPR_SWAP(grpc_pollset_set*, a, b);
}
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "PSS: parent %p to %p", b, a);
+ gpr_log(GPR_INFO, "PSS: parent %p to %p", b, a);
}
gpr_ref(&a->refs);
b->parent = a;
diff --git a/src/core/lib/iomgr/ev_epollsig_linux.cc b/src/core/lib/iomgr/ev_epollsig_linux.cc
index 1e30f6637b..494bc71c1d 100644
--- a/src/core/lib/iomgr/ev_epollsig_linux.cc
+++ b/src/core/lib/iomgr/ev_epollsig_linux.cc
@@ -292,7 +292,7 @@ static void pi_add_ref_dbg(polling_island* pi, const char* reason,
const char* file, int line) {
if (grpc_polling_trace.enabled()) {
gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
" (%s) - (%s, %d)",
pi, old_cnt, old_cnt + 1, reason, file, line);
@@ -304,7 +304,7 @@ static void pi_unref_dbg(polling_island* pi, const char* reason,
const char* file, int line) {
if (grpc_polling_trace.enabled()) {
gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
" (%s) - (%s, %d)",
pi, old_cnt, (old_cnt - 1), reason, file, line);
diff --git a/src/core/lib/iomgr/ev_poll_posix.cc b/src/core/lib/iomgr/ev_poll_posix.cc
index d9aba9b6a3..504787e659 100644
--- a/src/core/lib/iomgr/ev_poll_posix.cc
+++ b/src/core/lib/iomgr/ev_poll_posix.cc
@@ -983,7 +983,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
GRPC_SCHEDULING_END_BLOCKING_REGION;
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%p poll=%d", pollset, r);
+ gpr_log(GPR_INFO, "%p poll=%d", pollset, r);
}
if (r < 0) {
@@ -1007,7 +1007,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
} else {
if (pfds[0].revents & POLLIN_CHECK) {
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%p: got_wakeup", pollset);
+ gpr_log(GPR_INFO, "%p: got_wakeup", pollset);
}
work_combine_error(
&error, grpc_wakeup_fd_consume_wakeup(&worker.wakeup_fd->fd));
@@ -1017,7 +1017,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
fd_end_poll(&watchers[i], 0, 0, nullptr);
} else {
if (grpc_polling_trace.enabled()) {
- gpr_log(GPR_DEBUG, "%p got_event: %d r:%d w:%d [%d]", pollset,
+ gpr_log(GPR_INFO, "%p got_event: %d r:%d w:%d [%d]", pollset,
pfds[i].fd, (pfds[i].revents & POLLIN_CHECK) != 0,
(pfds[i].revents & POLLOUT_CHECK) != 0, pfds[i].revents);
}
diff --git a/src/core/lib/iomgr/ev_posix.cc b/src/core/lib/iomgr/ev_posix.cc
index 8b80070265..4ea63fc6e8 100644
--- a/src/core/lib/iomgr/ev_posix.cc
+++ b/src/core/lib/iomgr/ev_posix.cc
@@ -46,9 +46,9 @@ grpc_core::DebugOnlyTraceFlag grpc_polling_api_trace(false, "polling_api");
#ifndef NDEBUG
// Polling API trace only enabled in debug builds
-#define GRPC_POLLING_API_TRACE(format, ...) \
- if (grpc_polling_api_trace.enabled()) { \
- gpr_log(GPR_DEBUG, "(polling-api) " format, __VA_ARGS__); \
+#define GRPC_POLLING_API_TRACE(format, ...) \
+ if (grpc_polling_api_trace.enabled()) { \
+ gpr_log(GPR_INFO, "(polling-api) " format, __VA_ARGS__); \
}
#else
#define GRPC_POLLING_API_TRACE(...)
diff --git a/src/core/lib/iomgr/executor.cc b/src/core/lib/iomgr/executor.cc
index b017db53f8..f19f8cf20d 100644
--- a/src/core/lib/iomgr/executor.cc
+++ b/src/core/lib/iomgr/executor.cc
@@ -69,7 +69,7 @@ static size_t run_closures(grpc_closure_list list) {
gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c,
c->file_created, c->line_created);
#else
- gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c);
+ gpr_log(GPR_INFO, "EXECUTOR: run %p", c);
#endif
}
#ifndef NDEBUG
@@ -150,7 +150,7 @@ static void executor_thread(void* arg) {
size_t subtract_depth = 0;
for (;;) {
if (executor_trace.enabled()) {
- gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")",
+ gpr_log(GPR_INFO, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")",
static_cast<int>(ts - g_thread_state), subtract_depth);
}
gpr_mu_lock(&ts->mu);
@@ -161,7 +161,7 @@ static void executor_thread(void* arg) {
}
if (ts->shutdown) {
if (executor_trace.enabled()) {
- gpr_log(GPR_DEBUG, "EXECUTOR[%d]: shutdown",
+ gpr_log(GPR_INFO, "EXECUTOR[%d]: shutdown",
static_cast<int>(ts - g_thread_state));
}
gpr_mu_unlock(&ts->mu);
@@ -172,7 +172,7 @@ static void executor_thread(void* arg) {
ts->elems = GRPC_CLOSURE_LIST_INIT;
gpr_mu_unlock(&ts->mu);
if (executor_trace.enabled()) {
- gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute",
+ gpr_log(GPR_INFO, "EXECUTOR[%d]: execute",
static_cast<int>(ts - g_thread_state));
}
@@ -199,7 +199,7 @@ static void executor_push(grpc_closure* closure, grpc_error* error,
gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p (created %s:%d) inline",
closure, closure->file_created, closure->line_created);
#else
- gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p inline", closure);
+ gpr_log(GPR_INFO, "EXECUTOR: schedule %p inline", closure);
#endif
}
grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(),
@@ -225,7 +225,7 @@ static void executor_push(grpc_closure* closure, grpc_error* error,
closure, is_short ? "short" : "long", closure->file_created,
closure->line_created, static_cast<int>(ts - g_thread_state));
#else
- gpr_log(GPR_DEBUG, "EXECUTOR: try to schedule %p (%s) to thread %d",
+ gpr_log(GPR_INFO, "EXECUTOR: try to schedule %p (%s) to thread %d",
closure, is_short ? "short" : "long",
(int)(ts - g_thread_state));
#endif
diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc
index 8c42dd78cf..8cf4fe9928 100644
--- a/src/core/lib/iomgr/resource_quota.cc
+++ b/src/core/lib/iomgr/resource_quota.cc
@@ -289,7 +289,7 @@ static bool rq_alloc(grpc_resource_quota* resource_quota) {
GRPC_RULIST_AWAITING_ALLOCATION))) {
gpr_mu_lock(&resource_user->mu);
if (grpc_resource_quota_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"RQ: check allocation for user %p shutdown=%" PRIdPTR
" free_pool=%" PRId64,
resource_user, gpr_atm_no_barrier_load(&resource_user->shutdown),
@@ -315,7 +315,7 @@ static bool rq_alloc(grpc_resource_quota* resource_quota) {
resource_quota->free_pool -= amt;
rq_update_estimate(resource_quota);
if (grpc_resource_quota_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"RQ %s %s: grant alloc %" PRId64
" bytes; rq_free_pool -> %" PRId64,
resource_quota->name, resource_user->name, amt,
@@ -323,7 +323,7 @@ static bool rq_alloc(grpc_resource_quota* resource_quota) {
}
} else if (grpc_resource_quota_trace.enabled() &&
resource_user->free_pool >= 0) {
- gpr_log(GPR_DEBUG, "RQ %s %s: discard already satisfied alloc request",
+ gpr_log(GPR_INFO, "RQ %s %s: discard already satisfied alloc request",
resource_quota->name, resource_user->name);
}
if (resource_user->free_pool >= 0) {
@@ -353,7 +353,7 @@ static bool rq_reclaim_from_per_user_free_pool(
resource_quota->free_pool += amt;
rq_update_estimate(resource_quota);
if (grpc_resource_quota_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"RQ %s %s: reclaim_from_per_user_free_pool %" PRId64
" bytes; rq_free_pool -> %" PRId64,
resource_quota->name, resource_user->name, amt,
@@ -376,9 +376,8 @@ static bool rq_reclaim(grpc_resource_quota* resource_quota, bool destructive) {
grpc_resource_user* resource_user = rulist_pop_head(resource_quota, list);
if (resource_user == nullptr) return false;
if (grpc_resource_quota_trace.enabled()) {
- gpr_log(GPR_DEBUG, "RQ %s %s: initiate %s reclamation",
- resource_quota->name, resource_user->name,
- destructive ? "destructive" : "benign");
+ gpr_log(GPR_INFO, "RQ %s %s: initiate %s reclamation", resource_quota->name,
+ resource_user->name, destructive ? "destructive" : "benign");
}
resource_quota->reclaiming = true;
grpc_resource_quota_ref_internal(resource_quota);
@@ -506,7 +505,7 @@ static void ru_post_destructive_reclaimer(void* ru, grpc_error* error) {
static void ru_shutdown(void* ru, grpc_error* error) {
if (grpc_resource_quota_trace.enabled()) {
- gpr_log(GPR_DEBUG, "RU shutdown %p", ru);
+ gpr_log(GPR_INFO, "RU shutdown %p", ru);
}
grpc_resource_user* resource_user = static_cast<grpc_resource_user*>(ru);
gpr_mu_lock(&resource_user->mu);
@@ -793,7 +792,7 @@ void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size,
resource_user->free_pool -= static_cast<int64_t>(size);
resource_user->outstanding_allocations += static_cast<int64_t>(size);
if (grpc_resource_quota_trace.enabled()) {
- gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64,
+ gpr_log(GPR_INFO, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64,
resource_user->resource_quota->name, resource_user->name, size,
resource_user->free_pool);
}
@@ -816,7 +815,7 @@ void grpc_resource_user_free(grpc_resource_user* resource_user, size_t size) {
bool was_zero_or_negative = resource_user->free_pool <= 0;
resource_user->free_pool += static_cast<int64_t>(size);
if (grpc_resource_quota_trace.enabled()) {
- gpr_log(GPR_DEBUG, "RQ %s %s: free %" PRIdPTR "; free_pool -> %" PRId64,
+ gpr_log(GPR_INFO, "RQ %s %s: free %" PRIdPTR "; free_pool -> %" PRId64,
resource_user->resource_quota->name, resource_user->name, size,
resource_user->free_pool);
}
@@ -842,7 +841,7 @@ void grpc_resource_user_post_reclaimer(grpc_resource_user* resource_user,
void grpc_resource_user_finish_reclamation(grpc_resource_user* resource_user) {
if (grpc_resource_quota_trace.enabled()) {
- gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete",
+ gpr_log(GPR_INFO, "RQ %s %s: reclamation complete",
resource_user->resource_quota->name, resource_user->name);
}
GRPC_CLOSURE_SCHED(
diff --git a/src/core/lib/iomgr/tcp_client_custom.cc b/src/core/lib/iomgr/tcp_client_custom.cc
index 55632a55a1..932c79ea0b 100644
--- a/src/core/lib/iomgr/tcp_client_custom.cc
+++ b/src/core/lib/iomgr/tcp_client_custom.cc
@@ -66,7 +66,7 @@ static void on_alarm(void* acp, grpc_error* error) {
grpc_custom_tcp_connect* connect = socket->connector;
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error);
- gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s",
+ gpr_log(GPR_INFO, "CLIENT_CONNECT: %s: on_alarm: error=%s",
connect->addr_name, str);
}
if (error == GRPC_ERROR_NONE) {
@@ -136,7 +136,7 @@ static void tcp_connect(grpc_closure* closure, grpc_endpoint** ep,
connect->refs = 2;
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %p %s: asynchronously connecting",
+ gpr_log(GPR_INFO, "CLIENT_CONNECT: %p %s: asynchronously connecting",
socket, connect->addr_name);
}
diff --git a/src/core/lib/iomgr/tcp_client_posix.cc b/src/core/lib/iomgr/tcp_client_posix.cc
index 9f19b833da..6144d389f7 100644
--- a/src/core/lib/iomgr/tcp_client_posix.cc
+++ b/src/core/lib/iomgr/tcp_client_posix.cc
@@ -104,7 +104,7 @@ static void tc_on_alarm(void* acp, grpc_error* error) {
async_connect* ac = static_cast<async_connect*>(acp);
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error);
- gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s", ac->addr_str,
+ gpr_log(GPR_INFO, "CLIENT_CONNECT: %s: on_alarm: error=%s", ac->addr_str,
str);
}
gpr_mu_lock(&ac->mu);
@@ -141,8 +141,8 @@ static void on_writable(void* acp, grpc_error* error) {
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error);
- gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_writable: error=%s",
- ac->addr_str, str);
+ gpr_log(GPR_INFO, "CLIENT_CONNECT: %s: on_writable: error=%s", ac->addr_str,
+ str);
}
gpr_mu_lock(&ac->mu);
@@ -325,7 +325,7 @@ void grpc_tcp_client_create_from_prepared_fd(
ac->channel_args = grpc_channel_args_copy(channel_args);
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting fd %p",
+ gpr_log(GPR_INFO, "CLIENT_CONNECT: %s: asynchronously connecting fd %p",
ac->addr_str, fdobj);
}
diff --git a/src/core/lib/iomgr/tcp_custom.cc b/src/core/lib/iomgr/tcp_custom.cc
index 2b1fc93028..b3b2934014 100644
--- a/src/core/lib/iomgr/tcp_custom.cc
+++ b/src/core/lib/iomgr/tcp_custom.cc
@@ -125,16 +125,16 @@ static void tcp_ref(custom_tcp_endpoint* tcp) { gpr_ref(&tcp->refcount); }
static void call_read_cb(custom_tcp_endpoint* tcp, grpc_error* error) {
grpc_closure* cb = tcp->read_cb;
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "TCP:%p call_cb %p %p:%p", tcp->socket, cb, cb->cb,
+ gpr_log(GPR_INFO, "TCP:%p call_cb %p %p:%p", tcp->socket, cb, cb->cb,
cb->cb_arg);
size_t i;
const char* str = grpc_error_string(error);
- gpr_log(GPR_DEBUG, "read: error=%s", str);
+ gpr_log(GPR_INFO, "read: error=%s", str);
for (i = 0; i < tcp->read_slices->count; i++) {
char* dump = grpc_dump_slice(tcp->read_slices->slices[i],
GPR_DUMP_HEX | GPR_DUMP_ASCII);
- gpr_log(GPR_DEBUG, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump);
+ gpr_log(GPR_INFO, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump);
gpr_free(dump);
}
}
@@ -171,7 +171,7 @@ static void custom_read_callback(grpc_custom_socket* socket, size_t nread,
static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)tcpp;
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "TCP:%p read_allocation_done: %s", tcp->socket,
+ gpr_log(GPR_INFO, "TCP:%p read_allocation_done: %s", tcp->socket,
grpc_error_string(error));
}
if (error == GRPC_ERROR_NONE) {
@@ -188,7 +188,7 @@ static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
}
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error);
- gpr_log(GPR_DEBUG, "Initiating read on %p: error=%s", tcp->socket, str);
+ gpr_log(GPR_INFO, "Initiating read on %p: error=%s", tcp->socket, str);
}
}
@@ -214,7 +214,7 @@ static void custom_write_callback(grpc_custom_socket* socket,
tcp->write_cb = nullptr;
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error);
- gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp->socket, str);
+ gpr_log(GPR_INFO, "write complete on %p: error=%s", tcp->socket, str);
}
TCP_UNREF(tcp, "write");
GRPC_CLOSURE_SCHED(cb, error);
@@ -231,8 +231,8 @@ static void endpoint_write(grpc_endpoint* ep, grpc_slice_buffer* write_slices,
for (j = 0; j < write_slices->count; j++) {
char* data = grpc_dump_slice(write_slices->slices[j],
GPR_DUMP_HEX | GPR_DUMP_ASCII);
- gpr_log(GPR_DEBUG, "WRITE %p (peer=%s): %s", tcp->socket,
- tcp->peer_string, data);
+ gpr_log(GPR_INFO, "WRITE %p (peer=%s): %s", tcp->socket, tcp->peer_string,
+ data);
gpr_free(data);
}
}
@@ -283,7 +283,7 @@ static void endpoint_shutdown(grpc_endpoint* ep, grpc_error* why) {
if (!tcp->shutting_down) {
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(why);
- gpr_log(GPR_DEBUG, "TCP %p shutdown why=%s", tcp->socket, str);
+ gpr_log(GPR_INFO, "TCP %p shutdown why=%s", tcp->socket, str);
}
tcp->shutting_down = true;
// GRPC_CLOSURE_SCHED(tcp->read_cb, GRPC_ERROR_REF(why));
@@ -345,7 +345,7 @@ grpc_endpoint* custom_tcp_endpoint_create(grpc_custom_socket* socket,
grpc_core::ExecCtx exec_ctx;
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "Creating TCP endpoint %p", socket);
+ gpr_log(GPR_INFO, "Creating TCP endpoint %p", socket);
}
memset(tcp, 0, sizeof(custom_tcp_endpoint));
socket->refs++;
diff --git a/src/core/lib/iomgr/tcp_posix.cc b/src/core/lib/iomgr/tcp_posix.cc
index 205af22531..153be05e83 100644
--- a/src/core/lib/iomgr/tcp_posix.cc
+++ b/src/core/lib/iomgr/tcp_posix.cc
@@ -120,7 +120,7 @@ static void tcp_drop_uncovered_then_handle_write(void* arg /* grpc_tcp */,
static void done_poller(void* bp, grpc_error* error_ignored) {
backup_poller* p = static_cast<backup_poller*>(bp);
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p destroy", p);
+ gpr_log(GPR_INFO, "BACKUP_POLLER:%p destroy", p);
}
grpc_pollset_destroy(BACKUP_POLLER_POLLSET(p));
gpr_free(p);
@@ -129,7 +129,7 @@ static void done_poller(void* bp, grpc_error* error_ignored) {
static void run_poller(void* bp, grpc_error* error_ignored) {
backup_poller* p = static_cast<backup_poller*>(bp);
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p run", p);
+ gpr_log(GPR_INFO, "BACKUP_POLLER:%p run", p);
}
gpr_mu_lock(p->pollset_mu);
grpc_millis deadline = grpc_core::ExecCtx::Get()->Now() + 10 * GPR_MS_PER_SEC;
@@ -145,18 +145,18 @@ static void run_poller(void* bp, grpc_error* error_ignored) {
gpr_mu_lock(p->pollset_mu);
bool cas_ok = gpr_atm_full_cas(&g_backup_poller, (gpr_atm)p, 0);
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p done cas_ok=%d", p, cas_ok);
+ gpr_log(GPR_INFO, "BACKUP_POLLER:%p done cas_ok=%d", p, cas_ok);
}
gpr_mu_unlock(p->pollset_mu);
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p shutdown", p);
+ gpr_log(GPR_INFO, "BACKUP_POLLER:%p shutdown", p);
}
grpc_pollset_shutdown(BACKUP_POLLER_POLLSET(p),
GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
grpc_schedule_on_exec_ctx));
} else {
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p reschedule", p);
+ gpr_log(GPR_INFO, "BACKUP_POLLER:%p reschedule", p);
}
GRPC_CLOSURE_SCHED(&p->run_poller, GRPC_ERROR_NONE);
}
@@ -167,7 +167,7 @@ static void drop_uncovered(grpc_tcp* tcp) {
gpr_atm old_count =
gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, -1);
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p uncover cnt %d->%d", p,
+ gpr_log(GPR_INFO, "BACKUP_POLLER:%p uncover cnt %d->%d", p,
static_cast<int>(old_count), static_cast<int>(old_count) - 1);
}
GPR_ASSERT(old_count != 1);
@@ -178,7 +178,7 @@ static void cover_self(grpc_tcp* tcp) {
gpr_atm old_count =
gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2);
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "BACKUP_POLLER: cover cnt %d->%d",
+ gpr_log(GPR_INFO, "BACKUP_POLLER: cover cnt %d->%d",
static_cast<int>(old_count), 2 + static_cast<int>(old_count));
}
if (old_count == 0) {
@@ -186,7 +186,7 @@ static void cover_self(grpc_tcp* tcp) {
p = static_cast<backup_poller*>(
gpr_zalloc(sizeof(*p) + grpc_pollset_size()));
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p create", p);
+ gpr_log(GPR_INFO, "BACKUP_POLLER:%p create", p);
}
grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
gpr_atm_rel_store(&g_backup_poller, (gpr_atm)p);
@@ -201,7 +201,7 @@ static void cover_self(grpc_tcp* tcp) {
}
}
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p add %p", p, tcp);
+ gpr_log(GPR_INFO, "BACKUP_POLLER:%p add %p", p, tcp);
}
grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd);
if (old_count != 0) {
@@ -211,7 +211,7 @@ static void cover_self(grpc_tcp* tcp) {
static void notify_on_read(grpc_tcp* tcp) {
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "TCP:%p notify_on_read", tcp);
+ gpr_log(GPR_INFO, "TCP:%p notify_on_read", tcp);
}
GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
grpc_schedule_on_exec_ctx);
@@ -220,7 +220,7 @@ static void notify_on_read(grpc_tcp* tcp) {
static void notify_on_write(grpc_tcp* tcp) {
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "TCP:%p notify_on_write", tcp);
+ gpr_log(GPR_INFO, "TCP:%p notify_on_write", tcp);
}
cover_self(tcp);
GRPC_CLOSURE_INIT(&tcp->write_done_closure,
@@ -231,7 +231,7 @@ static void notify_on_write(grpc_tcp* tcp) {
static void tcp_drop_uncovered_then_handle_write(void* arg, grpc_error* error) {
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "TCP:%p got_write: %s", arg, grpc_error_string(error));
+ gpr_log(GPR_INFO, "TCP:%p got_write: %s", arg, grpc_error_string(error));
}
drop_uncovered(static_cast<grpc_tcp*>(arg));
tcp_handle_write(arg, error);
@@ -351,15 +351,15 @@ static void call_read_cb(grpc_tcp* tcp, grpc_error* error) {
grpc_closure* cb = tcp->read_cb;
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
+ gpr_log(GPR_INFO, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
size_t i;
const char* str = grpc_error_string(error);
- gpr_log(GPR_DEBUG, "read: error=%s", str);
+ gpr_log(GPR_INFO, "read: error=%s", str);
for (i = 0; i < tcp->incoming_buffer->count; i++) {
char* dump = grpc_dump_slice(tcp->incoming_buffer->slices[i],
GPR_DUMP_HEX | GPR_DUMP_ASCII);
- gpr_log(GPR_DEBUG, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump);
+ gpr_log(GPR_INFO, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump);
gpr_free(dump);
}
}
@@ -371,7 +371,7 @@ static void call_read_cb(grpc_tcp* tcp, grpc_error* error) {
#define MAX_READ_IOVEC 4
static void tcp_do_read(grpc_tcp* tcp) {
- GPR_TIMER_SCOPE("tcp_continue_read", 0);
+ GPR_TIMER_SCOPE("tcp_do_read", 0);
struct msghdr msg;
struct iovec iov[MAX_READ_IOVEC];
ssize_t read_bytes;
@@ -441,7 +441,7 @@ static void tcp_do_read(grpc_tcp* tcp) {
static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
grpc_tcp* tcp = static_cast<grpc_tcp*>(tcpp);
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "TCP:%p read_allocation_done: %s", tcp,
+ gpr_log(GPR_INFO, "TCP:%p read_allocation_done: %s", tcp,
grpc_error_string(error));
}
if (error != GRPC_ERROR_NONE) {
@@ -459,13 +459,13 @@ static void tcp_continue_read(grpc_tcp* tcp) {
if (tcp->incoming_buffer->length < target_read_size &&
tcp->incoming_buffer->count < MAX_READ_IOVEC) {
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "TCP:%p alloc_slices", tcp);
+ gpr_log(GPR_INFO, "TCP:%p alloc_slices", tcp);
}
grpc_resource_user_alloc_slices(&tcp->slice_allocator, target_read_size, 1,
tcp->incoming_buffer);
} else {
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "TCP:%p do_read", tcp);
+ gpr_log(GPR_INFO, "TCP:%p do_read", tcp);
}
tcp_do_read(tcp);
}
@@ -475,7 +475,7 @@ static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error) {
grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
GPR_ASSERT(!tcp->finished_edge);
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "TCP:%p got_read: %s", tcp, grpc_error_string(error));
+ gpr_log(GPR_INFO, "TCP:%p got_read: %s", tcp, grpc_error_string(error));
}
if (error != GRPC_ERROR_NONE) {
@@ -618,7 +618,7 @@ static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error) {
if (!tcp_flush(tcp, &error)) {
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "write: delayed");
+ gpr_log(GPR_INFO, "write: delayed");
}
notify_on_write(tcp);
} else {
@@ -626,7 +626,7 @@ static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error) {
tcp->write_cb = nullptr;
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error);
- gpr_log(GPR_DEBUG, "write: %s", str);
+ gpr_log(GPR_INFO, "write: %s", str);
}
GRPC_CLOSURE_RUN(cb, error);
@@ -646,7 +646,7 @@ static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
for (i = 0; i < buf->count; i++) {
char* data =
grpc_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
- gpr_log(GPR_DEBUG, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
+ gpr_log(GPR_INFO, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
gpr_free(data);
}
}
@@ -668,13 +668,13 @@ static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
TCP_REF(tcp, "write");
tcp->write_cb = cb;
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "write: delayed");
+ gpr_log(GPR_INFO, "write: delayed");
}
notify_on_write(tcp);
} else {
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error);
- gpr_log(GPR_DEBUG, "write: %s", str);
+ gpr_log(GPR_INFO, "write: %s", str);
}
GRPC_CLOSURE_SCHED(cb, error);
}
diff --git a/src/core/lib/iomgr/tcp_server_custom.cc b/src/core/lib/iomgr/tcp_server_custom.cc
index 79ba5c39ee..019b354473 100644
--- a/src/core/lib/iomgr/tcp_server_custom.cc
+++ b/src/core/lib/iomgr/tcp_server_custom.cc
@@ -222,10 +222,10 @@ static void finish_accept(grpc_tcp_listener* sp, grpc_custom_socket* socket) {
}
if (grpc_tcp_trace.enabled()) {
if (peer_name_string) {
- gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p accepted connection: %s",
+ gpr_log(GPR_INFO, "SERVER_CONNECT: %p accepted connection: %s",
sp->server, peer_name_string);
} else {
- gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p accepted connection", sp->server);
+ gpr_log(GPR_INFO, "SERVER_CONNECT: %p accepted connection", sp->server);
}
}
ep = custom_tcp_endpoint_create(socket, sp->server->resource_quota,
@@ -377,10 +377,10 @@ static grpc_error* tcp_server_add_port(grpc_tcp_server* s,
grpc_sockaddr_to_string(&port_string, addr, 0);
const char* str = grpc_error_string(error);
if (port_string) {
- gpr_log(GPR_DEBUG, "SERVER %p add_port %s error=%s", s, port_string, str);
+ gpr_log(GPR_INFO, "SERVER %p add_port %s error=%s", s, port_string, str);
gpr_free(port_string);
} else {
- gpr_log(GPR_DEBUG, "SERVER %p add_port error=%s", s, str);
+ gpr_log(GPR_INFO, "SERVER %p add_port error=%s", s, str);
}
}
@@ -419,7 +419,7 @@ static void tcp_server_start(grpc_tcp_server* server, grpc_pollset** pollsets,
(void)pollset_count;
GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "SERVER_START %p", server);
+ gpr_log(GPR_INFO, "SERVER_START %p", server);
}
GPR_ASSERT(on_accept_cb);
GPR_ASSERT(!server->on_accept_cb);
diff --git a/src/core/lib/iomgr/tcp_server_posix.cc b/src/core/lib/iomgr/tcp_server_posix.cc
index f11b82f7ab..524beba9ab 100644
--- a/src/core/lib/iomgr/tcp_server_posix.cc
+++ b/src/core/lib/iomgr/tcp_server_posix.cc
@@ -228,7 +228,7 @@ static void on_read(void* arg, grpc_error* err) {
gpr_asprintf(&name, "tcp-server-connection:%s", addr_str);
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "SERVER_CONNECT: incoming connection: %s", addr_str);
+ gpr_log(GPR_INFO, "SERVER_CONNECT: incoming connection: %s", addr_str);
}
grpc_fd* fdobj = grpc_fd_create(fd, name);
diff --git a/src/core/lib/iomgr/tcp_uv.cc b/src/core/lib/iomgr/tcp_uv.cc
index f20f8dcb74..8d0e4a5e79 100644
--- a/src/core/lib/iomgr/tcp_uv.cc
+++ b/src/core/lib/iomgr/tcp_uv.cc
@@ -204,6 +204,9 @@ static grpc_error* uv_socket_init_helper(uv_socket_t* uv_socket, int domain) {
uv_socket->write_buffers = nullptr;
uv_socket->read_len = 0;
uv_tcp_nodelay(uv_socket->handle, 1);
+ // Node uses a garbage collector to call destructors, so we don't
+ // want to hold the uv loop open with active gRPC objects.
+ uv_unref((uv_handle_t*)uv_socket->handle);
uv_socket->pending_connection = false;
uv_socket->accept_socket = nullptr;
uv_socket->accept_error = GRPC_ERROR_NONE;
diff --git a/src/core/lib/iomgr/tcp_windows.cc b/src/core/lib/iomgr/tcp_windows.cc
index 04e6f11eee..5d316d477b 100644
--- a/src/core/lib/iomgr/tcp_windows.cc
+++ b/src/core/lib/iomgr/tcp_windows.cc
@@ -74,12 +74,28 @@ static grpc_error* set_dualstack(SOCKET sock) {
: GRPC_WSA_ERROR(WSAGetLastError(), "setsockopt(IPV6_V6ONLY)");
}
+static grpc_error* enable_loopback_fast_path(SOCKET sock) {
+ int status;
+ uint32_t param = 1;
+ DWORD ret;
+ status = WSAIoctl(sock, /*SIO_LOOPBACK_FAST_PATH==*/_WSAIOW(IOC_VENDOR, 16),
+ &param, sizeof(param), NULL, 0, &ret, 0, 0);
+ if (status == SOCKET_ERROR) {
+ status = WSAGetLastError();
+ }
+ return status == 0 || status == WSAEOPNOTSUPP
+ ? GRPC_ERROR_NONE
+ : GRPC_WSA_ERROR(status, "WSAIoctl(SIO_LOOPBACK_FAST_PATH)");
+}
+
grpc_error* grpc_tcp_prepare_socket(SOCKET sock) {
grpc_error* err;
err = set_non_block(sock);
if (err != GRPC_ERROR_NONE) return err;
err = set_dualstack(sock);
if (err != GRPC_ERROR_NONE) return err;
+ err = enable_loopback_fast_path(sock);
+ if (err != GRPC_ERROR_NONE) return err;
return GRPC_ERROR_NONE;
}
diff --git a/src/core/lib/iomgr/timer_generic.cc b/src/core/lib/iomgr/timer_generic.cc
index 0c6f236f83..de2256f7cb 100644
--- a/src/core/lib/iomgr/timer_generic.cc
+++ b/src/core/lib/iomgr/timer_generic.cc
@@ -346,9 +346,9 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline,
#endif
if (grpc_timer_trace.enabled()) {
- gpr_log(GPR_DEBUG,
- "TIMER %p: SET %" PRIdPTR " now %" PRIdPTR " call %p[%p]", timer,
- deadline, grpc_core::ExecCtx::Get()->Now(), closure, closure->cb);
+ gpr_log(GPR_INFO, "TIMER %p: SET %" PRIdPTR " now %" PRIdPTR " call %p[%p]",
+ timer, deadline, grpc_core::ExecCtx::Get()->Now(), closure,
+ closure->cb);
}
if (!g_shared_mutables.initialized) {
@@ -382,7 +382,7 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline,
list_join(&shard->list, timer);
}
if (grpc_timer_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
" .. add to shard %d with queue_deadline_cap=%" PRIdPTR
" => is_first_timer=%s",
static_cast<int>(shard - g_shards), shard->queue_deadline_cap,
@@ -404,7 +404,7 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline,
if (is_first_timer) {
gpr_mu_lock(&g_shared_mutables.mu);
if (grpc_timer_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. old shard min_deadline=%" PRIdPTR,
+ gpr_log(GPR_INFO, " .. old shard min_deadline=%" PRIdPTR,
shard->min_deadline);
}
if (deadline < shard->min_deadline) {
@@ -434,7 +434,7 @@ static void timer_cancel(grpc_timer* timer) {
timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, g_num_shards)];
gpr_mu_lock(&shard->mu);
if (grpc_timer_trace.enabled()) {
- gpr_log(GPR_DEBUG, "TIMER %p: CANCEL pending=%s", timer,
+ gpr_log(GPR_INFO, "TIMER %p: CANCEL pending=%s", timer,
timer->pending ? "true" : "false");
}
@@ -475,7 +475,7 @@ static int refill_heap(timer_shard* shard, gpr_atm now) {
static_cast<gpr_atm>(deadline_delta * 1000.0));
if (grpc_timer_check_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. shard[%d]->queue_deadline_cap --> %" PRIdPTR,
+ gpr_log(GPR_INFO, " .. shard[%d]->queue_deadline_cap --> %" PRIdPTR,
static_cast<int>(shard - g_shards), shard->queue_deadline_cap);
}
for (timer = shard->list.next; timer != &shard->list; timer = next) {
@@ -483,7 +483,7 @@ static int refill_heap(timer_shard* shard, gpr_atm now) {
if (timer->deadline < shard->queue_deadline_cap) {
if (grpc_timer_check_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. add timer with deadline %" PRIdPTR " to heap",
+ gpr_log(GPR_INFO, " .. add timer with deadline %" PRIdPTR " to heap",
timer->deadline);
}
list_remove(timer);
@@ -500,7 +500,7 @@ static grpc_timer* pop_one(timer_shard* shard, gpr_atm now) {
grpc_timer* timer;
for (;;) {
if (grpc_timer_check_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. shard[%d]: heap_empty=%s",
+ gpr_log(GPR_INFO, " .. shard[%d]: heap_empty=%s",
static_cast<int>(shard - g_shards),
grpc_timer_heap_is_empty(&shard->heap) ? "true" : "false");
}
@@ -510,13 +510,13 @@ static grpc_timer* pop_one(timer_shard* shard, gpr_atm now) {
}
timer = grpc_timer_heap_top(&shard->heap);
if (grpc_timer_check_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
" .. check top timer deadline=%" PRIdPTR " now=%" PRIdPTR,
timer->deadline, now);
}
if (timer->deadline > now) return nullptr;
if (grpc_timer_trace.enabled()) {
- gpr_log(GPR_DEBUG, "TIMER %p: FIRE %" PRIdPTR "ms late via %s scheduler",
+ gpr_log(GPR_INFO, "TIMER %p: FIRE %" PRIdPTR "ms late via %s scheduler",
timer, now - timer->deadline,
timer->closure->scheduler->vtable->name);
}
@@ -540,7 +540,7 @@ static size_t pop_timers(timer_shard* shard, gpr_atm now,
*new_min_deadline = compute_min_deadline(shard);
gpr_mu_unlock(&shard->mu);
if (grpc_timer_check_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. shard[%d] popped %" PRIdPTR,
+ gpr_log(GPR_INFO, " .. shard[%d] popped %" PRIdPTR,
static_cast<int>(shard - g_shards), n);
}
return n;
@@ -563,7 +563,7 @@ static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
result = GRPC_TIMERS_CHECKED_AND_EMPTY;
if (grpc_timer_check_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. shard[%d]->min_deadline = %" PRIdPTR,
+ gpr_log(GPR_INFO, " .. shard[%d]->min_deadline = %" PRIdPTR,
static_cast<int>(g_shard_queue[0] - g_shards),
g_shard_queue[0]->min_deadline);
}
@@ -580,7 +580,7 @@ static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
}
if (grpc_timer_check_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
" .. result --> %d"
", shard[%d]->min_deadline %" PRIdPTR " --> %" PRIdPTR
", now=%" PRIdPTR,
@@ -624,7 +624,7 @@ static grpc_timer_check_result timer_check(grpc_millis* next) {
*next = GPR_MIN(*next, min_timer);
}
if (grpc_timer_check_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"TIMER CHECK SKIP: now=%" PRIdPTR " min_timer=%" PRIdPTR, now,
min_timer);
}
@@ -644,7 +644,7 @@ static grpc_timer_check_result timer_check(grpc_millis* next) {
} else {
gpr_asprintf(&next_str, "%" PRIdPTR, *next);
}
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"TIMER CHECK BEGIN: now=%" PRIdPTR " next=%s tls_min=%" PRIdPTR
" glob_min=%" PRIdPTR,
now, next_str, gpr_tls_get(&g_last_seen_min_timer),
@@ -662,7 +662,7 @@ static grpc_timer_check_result timer_check(grpc_millis* next) {
} else {
gpr_asprintf(&next_str, "%" PRIdPTR, *next);
}
- gpr_log(GPR_DEBUG, "TIMER CHECK END: r=%d; next=%s", r, next_str);
+ gpr_log(GPR_INFO, "TIMER CHECK END: r=%d; next=%s", r, next_str);
gpr_free(next_str);
}
return r;
diff --git a/src/core/lib/iomgr/timer_manager.cc b/src/core/lib/iomgr/timer_manager.cc
index 94f288af27..35e7914568 100644
--- a/src/core/lib/iomgr/timer_manager.cc
+++ b/src/core/lib/iomgr/timer_manager.cc
@@ -82,7 +82,7 @@ static void start_timer_thread_and_unlock(void) {
++g_thread_count;
gpr_mu_unlock(&g_mu);
if (grpc_timer_check_trace.enabled()) {
- gpr_log(GPR_DEBUG, "Spawn timer thread");
+ gpr_log(GPR_INFO, "Spawn timer thread");
}
completed_thread* ct =
static_cast<completed_thread*>(gpr_malloc(sizeof(*ct)));
@@ -108,7 +108,7 @@ static void run_some_timers() {
// waiter so that the next deadline is not missed
if (!g_has_timed_waiter) {
if (grpc_timer_check_trace.enabled()) {
- gpr_log(GPR_DEBUG, "kick untimed waiter");
+ gpr_log(GPR_INFO, "kick untimed waiter");
}
gpr_cv_signal(&g_cv_wait);
}
@@ -116,7 +116,7 @@ static void run_some_timers() {
}
// without our lock, flush the exec_ctx
if (grpc_timer_check_trace.enabled()) {
- gpr_log(GPR_DEBUG, "flush exec_ctx");
+ gpr_log(GPR_INFO, "flush exec_ctx");
}
grpc_core::ExecCtx::Get()->Flush();
gpr_mu_lock(&g_mu);
@@ -172,8 +172,7 @@ static bool wait_until(grpc_millis next) {
if (grpc_timer_check_trace.enabled()) {
grpc_millis wait_time = next - grpc_core::ExecCtx::Get()->Now();
- gpr_log(GPR_DEBUG, "sleep for a %" PRIdPTR " milliseconds",
- wait_time);
+ gpr_log(GPR_INFO, "sleep for a %" PRIdPTR " milliseconds", wait_time);
}
} else { // g_timed_waiter == true && next >= g_timed_waiter_deadline
next = GRPC_MILLIS_INF_FUTURE;
@@ -181,14 +180,14 @@ static bool wait_until(grpc_millis next) {
}
if (grpc_timer_check_trace.enabled() && next == GRPC_MILLIS_INF_FUTURE) {
- gpr_log(GPR_DEBUG, "sleep until kicked");
+ gpr_log(GPR_INFO, "sleep until kicked");
}
gpr_cv_wait(&g_cv_wait, &g_mu,
grpc_millis_to_timespec(next, GPR_CLOCK_MONOTONIC));
if (grpc_timer_check_trace.enabled()) {
- gpr_log(GPR_DEBUG, "wait ended: was_timed:%d kicked:%d",
+ gpr_log(GPR_INFO, "wait ended: was_timed:%d kicked:%d",
my_timed_waiter_generation == g_timed_waiter_generation,
g_kicked);
}
@@ -233,7 +232,7 @@ static void timer_main_loop() {
Consequently, we can just sleep forever here and be happy at some
saved wakeup cycles. */
if (grpc_timer_check_trace.enabled()) {
- gpr_log(GPR_DEBUG, "timers not checked: expect another thread to");
+ gpr_log(GPR_INFO, "timers not checked: expect another thread to");
}
next = GRPC_MILLIS_INF_FUTURE;
/* fall through */
@@ -259,7 +258,7 @@ static void timer_thread_cleanup(completed_thread* ct) {
g_completed_threads = ct;
gpr_mu_unlock(&g_mu);
if (grpc_timer_check_trace.enabled()) {
- gpr_log(GPR_DEBUG, "End timer thread");
+ gpr_log(GPR_INFO, "End timer thread");
}
}
@@ -301,18 +300,18 @@ void grpc_timer_manager_init(void) {
static void stop_threads(void) {
gpr_mu_lock(&g_mu);
if (grpc_timer_check_trace.enabled()) {
- gpr_log(GPR_DEBUG, "stop timer threads: threaded=%d", g_threaded);
+ gpr_log(GPR_INFO, "stop timer threads: threaded=%d", g_threaded);
}
if (g_threaded) {
g_threaded = false;
gpr_cv_broadcast(&g_cv_wait);
if (grpc_timer_check_trace.enabled()) {
- gpr_log(GPR_DEBUG, "num timer threads: %d", g_thread_count);
+ gpr_log(GPR_INFO, "num timer threads: %d", g_thread_count);
}
while (g_thread_count > 0) {
gpr_cv_wait(&g_cv_shutdown, &g_mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
if (grpc_timer_check_trace.enabled()) {
- gpr_log(GPR_DEBUG, "num timer threads: %d", g_thread_count);
+ gpr_log(GPR_INFO, "num timer threads: %d", g_thread_count);
}
gc_completed_threads();
}
diff --git a/src/core/lib/iomgr/timer_uv.cc b/src/core/lib/iomgr/timer_uv.cc
index dadeb960b2..8b7c82eb7d 100644
--- a/src/core/lib/iomgr/timer_uv.cc
+++ b/src/core/lib/iomgr/timer_uv.cc
@@ -52,6 +52,9 @@ static void timer_start(grpc_custom_timer* t) {
uv_timer->data = t;
t->timer = (void*)uv_timer;
uv_timer_start(uv_timer, run_expired_timer, t->timeout_ms, 0);
+ // Node uses a garbage collector to call destructors, so we don't
+ // want to hold the uv loop open with active gRPC objects.
+ uv_unref((uv_handle_t*)uv_timer);
}
static void timer_stop(grpc_custom_timer* t) {
diff --git a/src/core/lib/profiling/basic_timers.cc b/src/core/lib/profiling/basic_timers.cc
index 652a498b6e..b19ad9fc23 100644
--- a/src/core/lib/profiling/basic_timers.cc
+++ b/src/core/lib/profiling/basic_timers.cc
@@ -27,6 +27,7 @@
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
#include <inttypes.h>
+#include <pthread.h>
#include <stdio.h>
#include <string.h>
diff --git a/src/core/lib/security/security_connector/security_connector.cc b/src/core/lib/security/security_connector/security_connector.cc
index 3551061aa4..6eae30a6e5 100644
--- a/src/core/lib/security/security_connector/security_connector.cc
+++ b/src/core/lib/security/security_connector/security_connector.cc
@@ -786,23 +786,26 @@ static void ssl_server_add_handshakers(grpc_server_security_connector* sc,
tsi_create_adapter_handshaker(tsi_hs), &sc->base));
}
-static int ssl_host_matches_name(const tsi_peer* peer, const char* peer_name) {
+int grpc_ssl_host_matches_name(const tsi_peer* peer, const char* peer_name) {
char* allocated_name = nullptr;
int r;
- if (strchr(peer_name, ':') != nullptr) {
- char* ignored_port;
- gpr_split_host_port(peer_name, &allocated_name, &ignored_port);
- gpr_free(ignored_port);
- peer_name = allocated_name;
- if (!peer_name) return 0;
- }
+ char* ignored_port;
+ gpr_split_host_port(peer_name, &allocated_name, &ignored_port);
+ gpr_free(ignored_port);
+ peer_name = allocated_name;
+ if (!peer_name) return 0;
+
+ // IPv6 zone-id should not be included in comparisons.
+ char* const zone_id = strchr(allocated_name, '%');
+ if (zone_id != nullptr) *zone_id = '\0';
+
r = tsi_ssl_peer_matches_name(peer, peer_name);
gpr_free(allocated_name);
return r;
}
-grpc_auth_context* tsi_ssl_peer_to_auth_context(const tsi_peer* peer) {
+grpc_auth_context* grpc_ssl_peer_to_auth_context(const tsi_peer* peer) {
size_t i;
grpc_auth_context* ctx = nullptr;
const char* peer_identity_property_name = nullptr;
@@ -859,14 +862,14 @@ static grpc_error* ssl_check_peer(grpc_security_connector* sc,
}
/* Check the peer name if specified. */
- if (peer_name != nullptr && !ssl_host_matches_name(peer, peer_name)) {
+ if (peer_name != nullptr && !grpc_ssl_host_matches_name(peer, peer_name)) {
char* msg;
gpr_asprintf(&msg, "Peer name %s is not in peer certificate", peer_name);
grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return error;
}
- *auth_context = tsi_ssl_peer_to_auth_context(peer);
+ *auth_context = grpc_ssl_peer_to_auth_context(peer);
return GRPC_ERROR_NONE;
}
@@ -924,7 +927,7 @@ static void add_shallow_auth_property_to_peer(tsi_peer* peer,
tsi_prop->value.length = prop->value_length;
}
-tsi_peer tsi_shallow_peer_from_ssl_auth_context(
+tsi_peer grpc_shallow_peer_from_ssl_auth_context(
const grpc_auth_context* auth_context) {
size_t max_num_props = 0;
grpc_auth_property_iterator it;
@@ -955,7 +958,7 @@ tsi_peer tsi_shallow_peer_from_ssl_auth_context(
return peer;
}
-void tsi_shallow_peer_destruct(tsi_peer* peer) {
+void grpc_shallow_peer_destruct(tsi_peer* peer) {
if (peer->properties != nullptr) gpr_free(peer->properties);
}
@@ -967,8 +970,8 @@ static bool ssl_channel_check_call_host(grpc_channel_security_connector* sc,
grpc_ssl_channel_security_connector* c =
reinterpret_cast<grpc_ssl_channel_security_connector*>(sc);
grpc_security_status status = GRPC_SECURITY_ERROR;
- tsi_peer peer = tsi_shallow_peer_from_ssl_auth_context(auth_context);
- if (ssl_host_matches_name(&peer, host)) status = GRPC_SECURITY_OK;
+ tsi_peer peer = grpc_shallow_peer_from_ssl_auth_context(auth_context);
+ if (grpc_ssl_host_matches_name(&peer, host)) status = GRPC_SECURITY_OK;
/* If the target name was overridden, then the original target_name was
'checked' transitively during the previous peer check at the end of the
handshake. */
@@ -980,7 +983,7 @@ static bool ssl_channel_check_call_host(grpc_channel_security_connector* sc,
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"call host does not match SSL server name");
}
- tsi_shallow_peer_destruct(&peer);
+ grpc_shallow_peer_destruct(&peer);
return true;
}
diff --git a/src/core/lib/security/security_connector/security_connector.h b/src/core/lib/security/security_connector/security_connector.h
index c4cc19db81..f9723166d0 100644
--- a/src/core/lib/security/security_connector/security_connector.h
+++ b/src/core/lib/security/security_connector/security_connector.h
@@ -239,10 +239,11 @@ const tsi_peer_property* tsi_peer_get_property_by_name(const tsi_peer* peer,
const char* name);
/* Exposed for testing only. */
-grpc_auth_context* tsi_ssl_peer_to_auth_context(const tsi_peer* peer);
-tsi_peer tsi_shallow_peer_from_ssl_auth_context(
+grpc_auth_context* grpc_ssl_peer_to_auth_context(const tsi_peer* peer);
+tsi_peer grpc_shallow_peer_from_ssl_auth_context(
const grpc_auth_context* auth_context);
-void tsi_shallow_peer_destruct(tsi_peer* peer);
+void grpc_shallow_peer_destruct(tsi_peer* peer);
+int grpc_ssl_host_matches_name(const tsi_peer* peer, const char* peer_name);
/* --- Default SSL Root Store. --- */
namespace grpc_core {
diff --git a/src/core/lib/security/transport/secure_endpoint.cc b/src/core/lib/security/transport/secure_endpoint.cc
index 31b779e333..840b2e73bc 100644
--- a/src/core/lib/security/transport/secure_endpoint.cc
+++ b/src/core/lib/security/transport/secure_endpoint.cc
@@ -133,7 +133,7 @@ static void call_read_cb(secure_endpoint* ep, grpc_error* error) {
for (i = 0; i < ep->read_buffer->count; i++) {
char* data = grpc_dump_slice(ep->read_buffer->slices[i],
GPR_DUMP_HEX | GPR_DUMP_ASCII);
- gpr_log(GPR_DEBUG, "READ %p: %s", ep, data);
+ gpr_log(GPR_INFO, "READ %p: %s", ep, data);
gpr_free(data);
}
}
@@ -269,7 +269,7 @@ static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
for (i = 0; i < slices->count; i++) {
char* data =
grpc_dump_slice(slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
- gpr_log(GPR_DEBUG, "WRITE %p: %s", ep, data);
+ gpr_log(GPR_INFO, "WRITE %p: %s", ep, data);
gpr_free(data);
}
}
diff --git a/src/core/lib/security/transport/security_handshaker.cc b/src/core/lib/security/transport/security_handshaker.cc
index 0c97dfa6b3..d9ba3483e6 100644
--- a/src/core/lib/security/transport/security_handshaker.cc
+++ b/src/core/lib/security/transport/security_handshaker.cc
@@ -232,6 +232,10 @@ static grpc_error* on_handshake_next_done_locked(
const unsigned char* bytes_to_send, size_t bytes_to_send_size,
tsi_handshaker_result* handshaker_result) {
grpc_error* error = GRPC_ERROR_NONE;
+ // Handshaker was shutdown.
+ if (h->shutdown) {
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshaker shutdown");
+ }
// Read more if we need to.
if (result == TSI_INCOMPLETE_DATA) {
GPR_ASSERT(bytes_to_send_size == 0);
@@ -406,7 +410,7 @@ static void security_handshaker_do_handshake(grpc_handshaker* handshaker,
static const grpc_handshaker_vtable security_handshaker_vtable = {
security_handshaker_destroy, security_handshaker_shutdown,
- security_handshaker_do_handshake};
+ security_handshaker_do_handshake, "security"};
static grpc_handshaker* security_handshaker_create(
tsi_handshaker* handshaker, grpc_security_connector* connector) {
@@ -456,7 +460,7 @@ static void fail_handshaker_do_handshake(grpc_handshaker* handshaker,
static const grpc_handshaker_vtable fail_handshaker_vtable = {
fail_handshaker_destroy, fail_handshaker_shutdown,
- fail_handshaker_do_handshake};
+ fail_handshaker_do_handshake, "security_fail"};
static grpc_handshaker* fail_handshaker_create() {
grpc_handshaker* h = static_cast<grpc_handshaker*>(gpr_malloc(sizeof(*h)));
diff --git a/src/core/lib/slice/slice_hash_table.h b/src/core/lib/slice/slice_hash_table.h
index fbe9cc58e8..4bbcf88e89 100644
--- a/src/core/lib/slice/slice_hash_table.h
+++ b/src/core/lib/slice/slice_hash_table.h
@@ -81,6 +81,10 @@ class SliceHashTable : public RefCounted<SliceHashTable<T>> {
template <typename T2, typename... Args>
friend T2* New(Args&&... args);
+ // So Delete() can call our private dtor.
+ template <typename T2>
+ friend void Delete(T2*);
+
SliceHashTable(size_t num_entries, Entry* entries, ValueCmp value_cmp);
virtual ~SliceHashTable();
diff --git a/src/core/lib/slice/slice_weak_hash_table.h b/src/core/lib/slice/slice_weak_hash_table.h
index 9d0ddfc2d2..dc3ccc5dad 100644
--- a/src/core/lib/slice/slice_weak_hash_table.h
+++ b/src/core/lib/slice/slice_weak_hash_table.h
@@ -65,6 +65,10 @@ class SliceWeakHashTable : public RefCounted<SliceWeakHashTable<T, Size>> {
template <typename T2, typename... Args>
friend T2* New(Args&&... args);
+ // So Delete() can call our private dtor.
+ template <typename T2>
+ friend void Delete(T2*);
+
SliceWeakHashTable() = default;
~SliceWeakHashTable() = default;
diff --git a/src/core/lib/surface/call.cc b/src/core/lib/surface/call.cc
index 9a9113643d..da488034ca 100644
--- a/src/core/lib/surface/call.cc
+++ b/src/core/lib/surface/call.cc
@@ -610,7 +610,7 @@ grpc_call_error grpc_call_cancel(grpc_call* call, void* reserved) {
// This is called via the call combiner to start sending a batch down
// the filter stack.
static void execute_batch_in_call_combiner(void* arg, grpc_error* ignored) {
- GPR_TIMER_SCOPE("execute_batch", 0);
+ GPR_TIMER_SCOPE("execute_batch_in_call_combiner", 0);
grpc_transport_stream_op_batch* batch =
static_cast<grpc_transport_stream_op_batch*>(arg);
grpc_call* call = static_cast<grpc_call*>(batch->handler_private.extra_arg);
@@ -747,10 +747,10 @@ static void get_final_status(
status[i] = unpack_received_status(gpr_atm_acq_load(&call->status[i]));
}
if (grpc_call_error_trace.enabled()) {
- gpr_log(GPR_DEBUG, "get_final_status %s", call->is_client ? "CLI" : "SVR");
+ gpr_log(GPR_INFO, "get_final_status %s", call->is_client ? "CLI" : "SVR");
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
if (status[i].is_set) {
- gpr_log(GPR_DEBUG, " %d: %s", i, grpc_error_string(status[i].error));
+ gpr_log(GPR_INFO, " %d: %s", i, grpc_error_string(status[i].error));
}
}
}
@@ -1539,7 +1539,7 @@ static void free_no_op_completion(void* p, grpc_cq_completion* completion) {
static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops,
size_t nops, void* notify_tag,
int is_notify_tag_closure) {
- GPR_TIMER_SCOPE("grpc_call_start_batch", 0);
+ GPR_TIMER_SCOPE("call_start_batch", 0);
size_t i;
const grpc_op* op;
diff --git a/src/core/lib/surface/server.cc b/src/core/lib/surface/server.cc
index f7505c888e..cb34def740 100644
--- a/src/core/lib/surface/server.cc
+++ b/src/core/lib/surface/server.cc
@@ -1161,6 +1161,22 @@ static void listener_destroy_done(void* s, grpc_error* error) {
gpr_mu_unlock(&server->mu_global);
}
+/*
+ - Kills all pending requests-for-incoming-RPC-calls (i.e the requests made via
+ grpc_server_request_call and grpc_server_request_registered call will now be
+ cancelled). See 'kill_pending_work_locked()'
+
+ - Shuts down the listeners (i.e the server will no longer listen on the port
+ for new incoming channels).
+
+ - Iterates through all channels on the server and sends shutdown msg (see
+ 'channel_broadcaster_shutdown()' for details) to the clients via the
+ transport layer. The transport layer then guarantees the following:
+ -- Sends shutdown to the client (for eg: HTTP2 transport sends GOAWAY)
+ -- If the server has outstanding calls that are in the process, the
+ connection is NOT closed until the server is done with all those calls
+ -- Once, there are no more calls in progress, the channel is closed
+ */
void grpc_server_shutdown_and_notify(grpc_server* server,
grpc_completion_queue* cq, void* tag) {
listener* l;
diff --git a/src/core/lib/transport/bdp_estimator.cc b/src/core/lib/transport/bdp_estimator.cc
index 8130535ddd..8e71f86989 100644
--- a/src/core/lib/transport/bdp_estimator.cc
+++ b/src/core/lib/transport/bdp_estimator.cc
@@ -47,7 +47,7 @@ grpc_millis BdpEstimator::CompletePing() {
double bw = dt > 0 ? (static_cast<double>(accumulator_) / dt) : 0;
int start_inter_ping_delay = inter_ping_delay_;
if (grpc_bdp_estimator_trace.enabled()) {
- gpr_log(GPR_DEBUG,
+ gpr_log(GPR_INFO,
"bdp[%s]:complete acc=%" PRId64 " est=%" PRId64
" dt=%lf bw=%lfMbs bw_est=%lfMbs",
name_, accumulator_, estimate_, dt, bw / 125000.0,
@@ -58,7 +58,7 @@ grpc_millis BdpEstimator::CompletePing() {
estimate_ = GPR_MAX(accumulator_, estimate_ * 2);
bw_est_ = bw;
if (grpc_bdp_estimator_trace.enabled()) {
- gpr_log(GPR_DEBUG, "bdp[%s]: estimate increased to %" PRId64, name_,
+ gpr_log(GPR_INFO, "bdp[%s]: estimate increased to %" PRId64, name_,
estimate_);
}
inter_ping_delay_ /= 2; // if the ping estimate changes,
@@ -75,7 +75,7 @@ grpc_millis BdpEstimator::CompletePing() {
if (start_inter_ping_delay != inter_ping_delay_) {
stable_estimate_count_ = 0;
if (grpc_bdp_estimator_trace.enabled()) {
- gpr_log(GPR_DEBUG, "bdp[%s]:update_inter_time to %dms", name_,
+ gpr_log(GPR_INFO, "bdp[%s]:update_inter_time to %dms", name_,
inter_ping_delay_);
}
}
diff --git a/src/core/lib/transport/bdp_estimator.h b/src/core/lib/transport/bdp_estimator.h
index e703af121c..ab13ae4be4 100644
--- a/src/core/lib/transport/bdp_estimator.h
+++ b/src/core/lib/transport/bdp_estimator.h
@@ -50,7 +50,7 @@ class BdpEstimator {
// transport (but not necessarily started)
void SchedulePing() {
if (grpc_bdp_estimator_trace.enabled()) {
- gpr_log(GPR_DEBUG, "bdp[%s]:sched acc=%" PRId64 " est=%" PRId64, name_,
+ gpr_log(GPR_INFO, "bdp[%s]:sched acc=%" PRId64 " est=%" PRId64, name_,
accumulator_, estimate_);
}
GPR_ASSERT(ping_state_ == PingState::UNSCHEDULED);
@@ -63,7 +63,7 @@ class BdpEstimator {
// the ping is on the wire
void StartPing() {
if (grpc_bdp_estimator_trace.enabled()) {
- gpr_log(GPR_DEBUG, "bdp[%s]:start acc=%" PRId64 " est=%" PRId64, name_,
+ gpr_log(GPR_INFO, "bdp[%s]:start acc=%" PRId64 " est=%" PRId64, name_,
accumulator_, estimate_);
}
GPR_ASSERT(ping_state_ == PingState::SCHEDULED);
diff --git a/src/core/lib/transport/connectivity_state.cc b/src/core/lib/transport/connectivity_state.cc
index 0122e773ca..db6b6c0444 100644
--- a/src/core/lib/transport/connectivity_state.cc
+++ b/src/core/lib/transport/connectivity_state.cc
@@ -78,7 +78,7 @@ grpc_connectivity_state grpc_connectivity_state_check(
grpc_connectivity_state cur = static_cast<grpc_connectivity_state>(
gpr_atm_no_barrier_load(&tracker->current_state_atm));
if (grpc_connectivity_state_trace.enabled()) {
- gpr_log(GPR_DEBUG, "CONWATCH: %p %s: get %s", tracker, tracker->name,
+ gpr_log(GPR_INFO, "CONWATCH: %p %s: get %s", tracker, tracker->name,
grpc_connectivity_state_name(cur));
}
return cur;
@@ -89,7 +89,7 @@ grpc_connectivity_state grpc_connectivity_state_get(
grpc_connectivity_state cur = static_cast<grpc_connectivity_state>(
gpr_atm_no_barrier_load(&tracker->current_state_atm));
if (grpc_connectivity_state_trace.enabled()) {
- gpr_log(GPR_DEBUG, "CONWATCH: %p %s: get %s", tracker, tracker->name,
+ gpr_log(GPR_INFO, "CONWATCH: %p %s: get %s", tracker, tracker->name,
grpc_connectivity_state_name(cur));
}
if (error != nullptr) {
@@ -110,10 +110,10 @@ bool grpc_connectivity_state_notify_on_state_change(
gpr_atm_no_barrier_load(&tracker->current_state_atm));
if (grpc_connectivity_state_trace.enabled()) {
if (current == nullptr) {
- gpr_log(GPR_DEBUG, "CONWATCH: %p %s: unsubscribe notify=%p", tracker,
+ gpr_log(GPR_INFO, "CONWATCH: %p %s: unsubscribe notify=%p", tracker,
tracker->name, notify);
} else {
- gpr_log(GPR_DEBUG, "CONWATCH: %p %s: from %s [cur=%s] notify=%p", tracker,
+ gpr_log(GPR_INFO, "CONWATCH: %p %s: from %s [cur=%s] notify=%p", tracker,
tracker->name, grpc_connectivity_state_name(*current),
grpc_connectivity_state_name(cur), notify);
}
@@ -161,7 +161,7 @@ void grpc_connectivity_state_set(grpc_connectivity_state_tracker* tracker,
grpc_connectivity_state_watcher* w;
if (grpc_connectivity_state_trace.enabled()) {
const char* error_string = grpc_error_string(error);
- gpr_log(GPR_DEBUG, "SET: %p %s: %s --> %s [%s] error=%p %s", tracker,
+ gpr_log(GPR_INFO, "SET: %p %s: %s --> %s [%s] error=%p %s", tracker,
tracker->name, grpc_connectivity_state_name(cur),
grpc_connectivity_state_name(state), reason, error, error_string);
}
@@ -187,8 +187,7 @@ void grpc_connectivity_state_set(grpc_connectivity_state_tracker* tracker,
*w->current = state;
tracker->watchers = w->next;
if (grpc_connectivity_state_trace.enabled()) {
- gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name,
- w->notify);
+ gpr_log(GPR_INFO, "NOTIFY: %p %s: %p", tracker, tracker->name, w->notify);
}
GRPC_CLOSURE_SCHED(w->notify, GRPC_ERROR_REF(tracker->current_error));
gpr_free(w);
diff --git a/src/core/tsi/ssl/session_cache/ssl_session_cache.h b/src/core/tsi/ssl/session_cache/ssl_session_cache.h
index 488638c9bb..a90cca1a2e 100644
--- a/src/core/tsi/ssl/session_cache/ssl_session_cache.h
+++ b/src/core/tsi/ssl/session_cache/ssl_session_cache.h
@@ -69,6 +69,10 @@ class SslSessionLRUCache : public grpc_core::RefCounted<SslSessionLRUCache> {
template <typename T, typename... Args>
friend T* grpc_core::New(Args&&... args);
+ // So Delete() can call our private dtor.
+ template <typename T>
+ friend void grpc_core::Delete(T*);
+
class Node;
explicit SslSessionLRUCache(size_t capacity);