From 817d28fed6184053153831ab194891be882df138 Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Thu, 14 Jun 2018 09:44:58 -0700 Subject: Revert "Merge pull request #15746 from grpc/revert-15709-recv_trailing_metadata_ready2" This reverts commit 3f9308ce1f8cb42c96901c1700f0b9dbb531f186, reversing changes made to 92a0ae0b1081840d2c5a488f66bf6550c1a492f4. --- src/core/lib/channel/connected_channel.cc | 9 ++- src/core/lib/gprpp/inlined_vector.h | 2 + src/core/lib/iomgr/call_combiner.h | 80 ++++++++++++++++++++++++++ src/core/lib/iomgr/closure.h | 5 +- src/core/lib/surface/call.cc | 81 +++++++++++++++++++-------- src/core/lib/transport/transport.cc | 29 +++++++--- src/core/lib/transport/transport.h | 22 ++++---- src/core/lib/transport/transport_op_string.cc | 7 --- 8 files changed, 182 insertions(+), 53 deletions(-) (limited to 'src/core/lib') diff --git a/src/core/lib/channel/connected_channel.cc b/src/core/lib/channel/connected_channel.cc index ddd3029402..e2ea334ded 100644 --- a/src/core/lib/channel/connected_channel.cc +++ b/src/core/lib/channel/connected_channel.cc @@ -51,6 +51,7 @@ typedef struct connected_channel_call_data { callback_state on_complete[6]; // Max number of pending batches. callback_state recv_initial_metadata_ready; callback_state recv_message_ready; + callback_state recv_trailing_metadata_ready; } call_data; static void run_in_call_combiner(void* arg, grpc_error* error) { @@ -111,6 +112,12 @@ static void con_start_transport_stream_op_batch( intercept_callback(calld, state, false, "recv_message_ready", &batch->payload->recv_message.recv_message_ready); } + if (batch->recv_trailing_metadata) { + callback_state* state = &calld->recv_trailing_metadata_ready; + intercept_callback( + calld, state, false, "recv_trailing_metadata_ready", + &batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready); + } if (batch->cancel_stream) { // There can be more than one cancellation batch in flight at any // given time, so we can't just pick out a fixed index into @@ -121,7 +128,7 @@ static void con_start_transport_stream_op_batch( static_cast(gpr_malloc(sizeof(*state))); intercept_callback(calld, state, true, "on_complete (cancel_stream)", &batch->on_complete); - } else { + } else if (batch->on_complete != nullptr) { callback_state* state = get_state_for_batch(calld, batch); intercept_callback(calld, state, false, "on_complete", &batch->on_complete); } diff --git a/src/core/lib/gprpp/inlined_vector.h b/src/core/lib/gprpp/inlined_vector.h index f36f6cb706..0d2586e507 100644 --- a/src/core/lib/gprpp/inlined_vector.h +++ b/src/core/lib/gprpp/inlined_vector.h @@ -99,6 +99,8 @@ class InlinedVector { void push_back(T&& value) { emplace_back(std::move(value)); } size_t size() const { return size_; } + bool empty() const { return size_ == 0; } + size_t capacity() const { return capacity_; } void clear() { diff --git a/src/core/lib/iomgr/call_combiner.h b/src/core/lib/iomgr/call_combiner.h index 0ccd08ea57..641fa18082 100644 --- a/src/core/lib/iomgr/call_combiner.h +++ b/src/core/lib/iomgr/call_combiner.h @@ -26,6 +26,7 @@ #include #include "src/core/lib/gpr/mpscq.h" +#include "src/core/lib/gprpp/inlined_vector.h" #include "src/core/lib/iomgr/closure.h" // A simple, lock-free mechanism for serializing activity related to a @@ -109,4 +110,83 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner, void grpc_call_combiner_cancel(grpc_call_combiner* call_combiner, grpc_error* error); +namespace grpc_core { + +// Helper for running a list of closures in a call combiner. +// +// Each callback running in the call combiner will eventually be +// returned to the surface, at which point the surface will yield the +// call combiner. So when we are running in the call combiner and have +// more than one callback to return to the surface, we need to re-enter +// the call combiner for all but one of those callbacks. +class CallCombinerClosureList { + public: + CallCombinerClosureList() {} + + // Adds a closure to the list. The closure must eventually result in + // the call combiner being yielded. + void Add(grpc_closure* closure, grpc_error* error, const char* reason) { + closures_.emplace_back(closure, error, reason); + } + + // Runs all closures in the call combiner and yields the call combiner. + // + // All but one of the closures in the list will be scheduled via + // GRPC_CALL_COMBINER_START(), and the remaining closure will be + // scheduled via GRPC_CLOSURE_SCHED(), which will eventually result in + // yielding the call combiner. If the list is empty, then the call + // combiner will be yielded immediately. + void RunClosures(grpc_call_combiner* call_combiner) { + if (closures_.empty()) { + GRPC_CALL_COMBINER_STOP(call_combiner, "no closures to schedule"); + return; + } + for (size_t i = 1; i < closures_.size(); ++i) { + auto& closure = closures_[i]; + GRPC_CALL_COMBINER_START(call_combiner, closure.closure, closure.error, + closure.reason); + } + if (grpc_call_combiner_trace.enabled()) { + gpr_log(GPR_INFO, + "CallCombinerClosureList executing closure while already " + "holding call_combiner %p: closure=%p error=%s reason=%s", + call_combiner, closures_[0].closure, + grpc_error_string(closures_[0].error), closures_[0].reason); + } + // This will release the call combiner. + GRPC_CLOSURE_SCHED(closures_[0].closure, closures_[0].error); + closures_.clear(); + } + + // Runs all closures in the call combiner, but does NOT yield the call + // combiner. All closures will be scheduled via GRPC_CALL_COMBINER_START(). + void RunClosuresWithoutYielding(grpc_call_combiner* call_combiner) { + for (size_t i = 0; i < closures_.size(); ++i) { + auto& closure = closures_[i]; + GRPC_CALL_COMBINER_START(call_combiner, closure.closure, closure.error, + closure.reason); + } + closures_.clear(); + } + + size_t size() const { return closures_.size(); } + + private: + struct CallCombinerClosure { + grpc_closure* closure; + grpc_error* error; + const char* reason; + + CallCombinerClosure(grpc_closure* closure, grpc_error* error, + const char* reason) + : closure(closure), error(error), reason(reason) {} + }; + + // There are generally a maximum of 6 closures to run in the call + // combiner, one for each pending op. + InlinedVector closures_; +}; + +} // namespace grpc_core + #endif /* GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H */ diff --git a/src/core/lib/iomgr/closure.h b/src/core/lib/iomgr/closure.h index 34a494485d..f14c723844 100644 --- a/src/core/lib/iomgr/closure.h +++ b/src/core/lib/iomgr/closure.h @@ -283,9 +283,10 @@ inline void grpc_closure_sched(grpc_closure* c, grpc_error* error) { if (c->scheduled) { gpr_log(GPR_ERROR, "Closure already scheduled. (closure: %p, created: [%s:%d], " - "previously scheduled at: [%s: %d] run?: %s", + "previously scheduled at: [%s: %d], newly scheduled at [%s: %d], " + "run?: %s", c, c->file_created, c->line_created, c->file_initiated, - c->line_initiated, c->run ? "true" : "false"); + c->line_initiated, file, line, c->run ? "true" : "false"); abort(); } c->scheduled = true; diff --git a/src/core/lib/surface/call.cc b/src/core/lib/surface/call.cc index 1cf8ea94e7..8b224b6e7b 100644 --- a/src/core/lib/surface/call.cc +++ b/src/core/lib/surface/call.cc @@ -233,6 +233,7 @@ struct grpc_call { grpc_closure receiving_slice_ready; grpc_closure receiving_stream_ready; grpc_closure receiving_initial_metadata_ready; + grpc_closure receiving_trailing_metadata_ready; uint32_t test_only_last_message_flags; grpc_closure release_call; @@ -270,8 +271,17 @@ struct grpc_call { grpc_core::TraceFlag grpc_call_error_trace(false, "call_error"); grpc_core::TraceFlag grpc_compression_trace(false, "compression"); -#define CALL_STACK_FROM_CALL(call) ((grpc_call_stack*)((call) + 1)) -#define CALL_FROM_CALL_STACK(call_stack) (((grpc_call*)(call_stack)) - 1) +/* Given a size, round up to the next multiple of sizeof(void*) */ +#define ROUND_UP_TO_ALIGNMENT_SIZE(x) \ + (((x) + GPR_MAX_ALIGNMENT - 1u) & ~(GPR_MAX_ALIGNMENT - 1u)) + +#define CALL_STACK_FROM_CALL(call) \ + (grpc_call_stack*)((char*)(call) + \ + ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call))) +#define CALL_FROM_CALL_STACK(call_stack) \ + (grpc_call*)(((char*)(call_stack)) - \ + ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call))) + #define CALL_ELEM_FROM_CALL(call, idx) \ grpc_call_stack_element(CALL_STACK_FROM_CALL(call), idx) #define CALL_FROM_TOP_ELEM(top_elem) \ @@ -342,8 +352,9 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args, size_t initial_size = grpc_channel_get_call_size_estimate(args->channel); GRPC_STATS_INC_CALL_INITIAL_SIZE(initial_size); gpr_arena* arena = gpr_arena_create(initial_size); - call = static_cast(gpr_arena_alloc( - arena, sizeof(grpc_call) + channel_stack->call_stack_size)); + call = static_cast( + gpr_arena_alloc(arena, ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call)) + + channel_stack->call_stack_size)); gpr_ref_init(&call->ext_ref, 1); call->arena = arena; grpc_call_combiner_init(&call->call_combiner); @@ -1209,7 +1220,6 @@ static void post_batch_completion(batch_control* bctl) { if (bctl->op.send_initial_metadata) { grpc_metadata_batch_destroy( - &call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */]); } if (bctl->op.send_message) { @@ -1217,14 +1227,9 @@ static void post_batch_completion(batch_control* bctl) { } if (bctl->op.send_trailing_metadata) { grpc_metadata_batch_destroy( - &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */]); } if (bctl->op.recv_trailing_metadata) { - grpc_metadata_batch* md = - &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */]; - recv_trailing_filter(call, md); - /* propagate cancellation to any interested children */ gpr_atm_rel_store(&call->received_final_op_atm, 1); parent_call* pc = get_parent_call(call); @@ -1246,7 +1251,6 @@ static void post_batch_completion(batch_control* bctl) { } gpr_mu_unlock(&pc->child_list_mu); } - if (call->is_client) { get_final_status(call, set_status_value_directly, call->final_op.client.status, @@ -1256,7 +1260,6 @@ static void post_batch_completion(batch_control* bctl) { get_final_status(call, set_cancelled_value, call->final_op.server.cancelled, nullptr, nullptr); } - GRPC_ERROR_UNREF(error); error = GRPC_ERROR_NONE; } @@ -1538,6 +1541,19 @@ static void receiving_initial_metadata_ready(void* bctlp, grpc_error* error) { finish_batch_step(bctl); } +static void receiving_trailing_metadata_ready(void* bctlp, grpc_error* error) { + batch_control* bctl = static_cast(bctlp); + grpc_call* call = bctl->call; + GRPC_CALL_COMBINER_STOP(&call->call_combiner, "recv_trailing_metadata_ready"); + add_batch_error(bctl, GRPC_ERROR_REF(error), false); + if (error == GRPC_ERROR_NONE) { + grpc_metadata_batch* md = + &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */]; + recv_trailing_filter(call, md); + } + finish_batch_step(bctl); +} + static void finish_batch(void* bctlp, grpc_error* error) { batch_control* bctl = static_cast(bctlp); grpc_call* call = bctl->call; @@ -1558,7 +1574,8 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops, size_t i; const grpc_op* op; batch_control* bctl; - int num_completion_callbacks_needed = 1; + bool has_send_ops = false; + int num_recv_ops = 0; grpc_call_error error = GRPC_CALL_OK; grpc_transport_stream_op_batch* stream_op; grpc_transport_stream_op_batch_payload* stream_op_payload; @@ -1664,6 +1681,7 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops, stream_op_payload->send_initial_metadata.peer_string = &call->peer_string; } + has_send_ops = true; break; } case GRPC_OP_SEND_MESSAGE: { @@ -1693,6 +1711,7 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops, &op->data.send_message.send_message->data.raw.slice_buffer, flags); stream_op_payload->send_message.send_message.reset( call->sending_stream.get()); + has_send_ops = true; break; } case GRPC_OP_SEND_CLOSE_FROM_CLIENT: { @@ -1713,6 +1732,7 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops, call->sent_final_op = true; stream_op_payload->send_trailing_metadata.send_trailing_metadata = &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */]; + has_send_ops = true; break; } case GRPC_OP_SEND_STATUS_FROM_SERVER: { @@ -1777,6 +1797,7 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops, } stream_op_payload->send_trailing_metadata.send_trailing_metadata = &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */]; + has_send_ops = true; break; } case GRPC_OP_RECV_INITIAL_METADATA: { @@ -1804,7 +1825,7 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops, stream_op_payload->recv_initial_metadata.peer_string = &call->peer_string; } - num_completion_callbacks_needed++; + ++num_recv_ops; break; } case GRPC_OP_RECV_MESSAGE: { @@ -1826,7 +1847,7 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops, grpc_schedule_on_exec_ctx); stream_op_payload->recv_message.recv_message_ready = &call->receiving_stream_ready; - num_completion_callbacks_needed++; + ++num_recv_ops; break; } case GRPC_OP_RECV_STATUS_ON_CLIENT: { @@ -1852,11 +1873,16 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops, call->final_op.client.error_string = op->data.recv_status_on_client.error_string; stream_op->recv_trailing_metadata = true; - stream_op->collect_stats = true; stream_op_payload->recv_trailing_metadata.recv_trailing_metadata = &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */]; - stream_op_payload->collect_stats.collect_stats = + stream_op_payload->recv_trailing_metadata.collect_stats = &call->final_info.stats.transport_stream_stats; + GRPC_CLOSURE_INIT(&call->receiving_trailing_metadata_ready, + receiving_trailing_metadata_ready, bctl, + grpc_schedule_on_exec_ctx); + stream_op_payload->recv_trailing_metadata.recv_trailing_metadata_ready = + &call->receiving_trailing_metadata_ready; + ++num_recv_ops; break; } case GRPC_OP_RECV_CLOSE_ON_SERVER: { @@ -1877,11 +1903,16 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops, call->final_op.server.cancelled = op->data.recv_close_on_server.cancelled; stream_op->recv_trailing_metadata = true; - stream_op->collect_stats = true; stream_op_payload->recv_trailing_metadata.recv_trailing_metadata = &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */]; - stream_op_payload->collect_stats.collect_stats = + stream_op_payload->recv_trailing_metadata.collect_stats = &call->final_info.stats.transport_stream_stats; + GRPC_CLOSURE_INIT(&call->receiving_trailing_metadata_ready, + receiving_trailing_metadata_ready, bctl, + grpc_schedule_on_exec_ctx); + stream_op_payload->recv_trailing_metadata.recv_trailing_metadata_ready = + &call->receiving_trailing_metadata_ready; + ++num_recv_ops; break; } } @@ -1891,13 +1922,15 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops, if (!is_notify_tag_closure) { GPR_ASSERT(grpc_cq_begin_op(call->cq, notify_tag)); } - gpr_ref_init(&bctl->steps_to_complete, num_completion_callbacks_needed); + gpr_ref_init(&bctl->steps_to_complete, (has_send_ops ? 1 : 0) + num_recv_ops); - GRPC_CLOSURE_INIT(&bctl->finish_batch, finish_batch, bctl, - grpc_schedule_on_exec_ctx); - stream_op->on_complete = &bctl->finish_batch; - gpr_atm_rel_store(&call->any_ops_sent_atm, 1); + if (has_send_ops) { + GRPC_CLOSURE_INIT(&bctl->finish_batch, finish_batch, bctl, + grpc_schedule_on_exec_ctx); + stream_op->on_complete = &bctl->finish_batch; + } + gpr_atm_rel_store(&call->any_ops_sent_atm, 1); execute_batch(call, stream_op, &bctl->start_batch); done: diff --git a/src/core/lib/transport/transport.cc b/src/core/lib/transport/transport.cc index 039d603394..cbdb77c844 100644 --- a/src/core/lib/transport/transport.cc +++ b/src/core/lib/transport/transport.cc @@ -212,21 +212,32 @@ void grpc_transport_stream_op_batch_finish_with_failure( if (batch->send_message) { batch->payload->send_message.send_message.reset(); } - if (batch->recv_message) { - GRPC_CALL_COMBINER_START( - call_combiner, batch->payload->recv_message.recv_message_ready, - GRPC_ERROR_REF(error), "failing recv_message_ready"); + if (batch->cancel_stream) { + GRPC_ERROR_UNREF(batch->payload->cancel_stream.cancel_error); } + // Construct a list of closures to execute. + grpc_core::CallCombinerClosureList closures; if (batch->recv_initial_metadata) { - GRPC_CALL_COMBINER_START( - call_combiner, + closures.Add( batch->payload->recv_initial_metadata.recv_initial_metadata_ready, GRPC_ERROR_REF(error), "failing recv_initial_metadata_ready"); } - GRPC_CLOSURE_SCHED(batch->on_complete, error); - if (batch->cancel_stream) { - GRPC_ERROR_UNREF(batch->payload->cancel_stream.cancel_error); + if (batch->recv_message) { + closures.Add(batch->payload->recv_message.recv_message_ready, + GRPC_ERROR_REF(error), "failing recv_message_ready"); + } + if (batch->recv_trailing_metadata) { + closures.Add( + batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready, + GRPC_ERROR_REF(error), "failing recv_trailing_metadata_ready"); + } + if (batch->on_complete != nullptr) { + closures.Add(batch->on_complete, GRPC_ERROR_REF(error), + "failing on_complete"); } + // Execute closures. + closures.RunClosures(call_combiner); + GRPC_ERROR_UNREF(error); } typedef struct { diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h index b2e252d939..585b9dfae9 100644 --- a/src/core/lib/transport/transport.h +++ b/src/core/lib/transport/transport.h @@ -122,9 +122,15 @@ typedef struct grpc_transport_stream_op_batch_payload /* Transport stream op: a set of operations to perform on a transport against a single stream */ typedef struct grpc_transport_stream_op_batch { - /** Should be enqueued when all requested operations (excluding recv_message - and recv_initial_metadata which have their own closures) in a given batch - have been completed. */ + /** Should be scheduled when all of the non-recv operations in the batch + are complete. + + The recv ops (recv_initial_metadata, recv_message, and + recv_trailing_metadata) each have their own callbacks. If a batch + contains both recv ops and non-recv ops, on_complete should be + scheduled as soon as the non-recv ops are complete, regardless of + whether or not the recv ops are complete. If a batch contains + only recv ops, on_complete can be null. */ grpc_closure* on_complete; /** Values for the stream op (fields set are determined by flags above) */ @@ -149,9 +155,6 @@ typedef struct grpc_transport_stream_op_batch { */ bool recv_trailing_metadata : 1; - /** Collect any stats into provided buffer, zero internal stat counters */ - bool collect_stats : 1; - /** Cancel this stream with the provided error */ bool cancel_stream : 1; @@ -219,11 +222,10 @@ struct grpc_transport_stream_op_batch_payload { struct { grpc_metadata_batch* recv_trailing_metadata; - } recv_trailing_metadata; - - struct { grpc_transport_stream_stats* collect_stats; - } collect_stats; + /** Should be enqueued when initial metadata is ready to be processed. */ + grpc_closure* recv_trailing_metadata_ready; + } recv_trailing_metadata; /** Forcefully close this stream. The HTTP2 semantics should be: diff --git a/src/core/lib/transport/transport_op_string.cc b/src/core/lib/transport/transport_op_string.cc index 25ab492f3a..8c7db642a5 100644 --- a/src/core/lib/transport/transport_op_string.cc +++ b/src/core/lib/transport/transport_op_string.cc @@ -120,13 +120,6 @@ char* grpc_transport_stream_op_batch_string( gpr_strvec_add(&b, tmp); } - if (op->collect_stats) { - gpr_strvec_add(&b, gpr_strdup(" ")); - gpr_asprintf(&tmp, "COLLECT_STATS:%p", - op->payload->collect_stats.collect_stats); - gpr_strvec_add(&b, tmp); - } - out = gpr_strvec_flatten(&b, nullptr); gpr_strvec_destroy(&b); -- cgit v1.2.3