diff options
author | Sree Kuchibhotla <sreek@google.com> | 2017-07-14 15:33:27 -0700 |
---|---|---|
committer | Sree Kuchibhotla <sreek@google.com> | 2017-07-14 15:33:27 -0700 |
commit | 1228237a3b5526d0f7eb8e6441902526dcaf6da4 (patch) | |
tree | b0b69f7fb1f36e6bf4f4c3eefc0010f022d9ec3f | |
parent | ce47215063da4103516a81b72721e4f698955b1b (diff) | |
parent | ac51c2aa9b6d5cef7f3b4a498c7ae8a3d60960ad (diff) |
Merge branch 'master' into sreek-epoll1
24 files changed, 296 insertions, 153 deletions
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 73c3609aa0..e4e7c47c5a 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -2,36 +2,40 @@ # Uses OWNERS files in different modules throughout the # repository as the source of truth for module ownership. /** @a11r @nicolasnoble @ctiller -/binding.gyp @murgatroid99 -/Gemfile @murgatroid99 -/grpc.gemspec @murgatroid99 -/package.json @murgatroid99 -/Rakefile @murgatroid99 +/*.podspec @muxi @makdharma @a11r @nicolasnoble @ctiller +/binding.gyp @murgatroid99 @a11r @nicolasnoble @ctiller +/Gemfile @murgatroid99 @a11r @nicolasnoble @ctiller +/grpc.gemspec @murgatroid99 @a11r @nicolasnoble @ctiller +/package.json @murgatroid99 @a11r @nicolasnoble @ctiller +/Rakefile @murgatroid99 @a11r @nicolasnoble @ctiller +/grpc.bzl @muxi @makdharma @a11r @nicolasnoble @ctiller /bazel/** @nicolasnoble @dgquintas @ctiller /cmake/** @jtattermusch @a11r @nicolasnoble @ctiller /doc/PROTOCOL-HTTP2.md @ejona86 @a11r @nicolasnoble @ctiller /doc/interop-test-descriptions.md @ejona86 @a11r @nicolasnoble @ctiller /etc/** @jboeuf @nicolasnoble @a11r @ctiller /examples/node/** @murgatroid99 @a11r @nicolasnoble @ctiller +/examples/objective-c/** @muxi @makdharma @a11r @nicolasnoble @ctiller +/examples/python/** @nathanielmanistaatgoogle @kpayson64 @mehrdada /include/** @ctiller @markdroth @dgquintas @a11r @nicolasnoble -/src/core/** @ctiller @markdroth @dgquintas @a11r @nicolasnoble -/src/core/lib/iomgr/*_uv.c @murgatroid99 @ctiller @markdroth @dgquintas @a11r @nicolasnoble -/src/core/lib/iomgr/*_uv.h @murgatroid99 @ctiller @markdroth @dgquintas @a11r @nicolasnoble -/src/cpp/** @ctiller @markdroth @dgquintas @a11r @nicolasnoble +/src/core/** @ctiller @markdroth @dgquintas +/src/core/lib/iomgr/*_uv.c @murgatroid99 @ctiller @markdroth @dgquintas +/src/core/lib/iomgr/*_uv.h @murgatroid99 @ctiller @markdroth @dgquintas +/src/cpp/** @ctiller @markdroth @dgquintas /src/csharp/** @jtattermusch @apolcyn @a11r @nicolasnoble @ctiller /src/node/** @murgatroid99 @a11r @nicolasnoble @ctiller /src/objective-c/** @muxi @makdharma @a11r @nicolasnoble @ctiller /src/php/** @stanley-cheung @murgatroid99 @a11r @nicolasnoble @ctiller -/src/python/** @nathanielmanistaatgoogle @kpayson64 @a11r @nicolasnoble @ctiller +/src/python/** @nathanielmanistaatgoogle @kpayson64 @mehrdada +/src/python/grpcio/grpc_core_dependencies.py @a11y @ctiller @nicolasnoble @nathanielmanistaatgoogle @kpayson64 @mehrdada /src/ruby/** @apolcyn @murgatroid99 @a11r @nicolasnoble @ctiller /test/build/** @ctiller @markdroth @dgquintas @a11r @nicolasnoble -/test/core/** @ctiller @markdroth @dgquintas @a11r @nicolasnoble -/test/cpp/** @ctiller @markdroth @dgquintas @a11r @nicolasnoble -/test/cpp/qps/** @vjpai @ctiller @markdroth @dgquintas @a11r @nicolasnoble +/test/core/** @ctiller @markdroth @dgquintas +/test/cpp/** @ctiller @markdroth @dgquintas +/test/cpp/qps/** @vjpai @ctiller @markdroth @dgquintas /test/distrib/node/** @murgatroid99 @a11r @nicolasnoble @ctiller /tools/** @matt-kwong @jtattermusch @nicolasnoble @a11r @ctiller /tools/codegen/core/** @ctiller @dgquintas @markdroth -/tools/dockerfile/** @matt-kwong @jtattermusch @nicolasnoble @a11r @ctiller -/tools/run_tests/** @matt-kwong @jtattermusch @nicolasnoble @a11r @ctiller +/tools/distrib/python/** @nathanielmanistaatgoogle @kpayson64 @mehrdada /tools/run_tests/artifacts/*_node* @murgatroid99 @matt-kwong @jtattermusch @nicolasnoble @a11r @ctiller /tools/run_tests/helper_scripts/*_node* @murgatroid99 @matt-kwong @jtattermusch @nicolasnoble @a11r @ctiller @@ -5,3 +5,5 @@ @ctiller @murgatroid99 binding.gyp Gemfile grpc.gemspec package.json Rakefile +@muxi *.podspec grpc.bzl +@makdharma *.podspec grpc.bzl diff --git a/examples/objective-c/OWNERS b/examples/objective-c/OWNERS new file mode 100644 index 0000000000..c14fe53c06 --- /dev/null +++ b/examples/objective-c/OWNERS @@ -0,0 +1,2 @@ +@muxi +@makdharma diff --git a/examples/python/OWNERS b/examples/python/OWNERS new file mode 100644 index 0000000000..ea3909e0a3 --- /dev/null +++ b/examples/python/OWNERS @@ -0,0 +1,5 @@ +set noparent + +@nathanielmanistaatgoogle +@kpayson64 +@mehrdada diff --git a/src/core/OWNERS b/src/core/OWNERS index 8dca75ce91..986c9a1c3c 100644 --- a/src/core/OWNERS +++ b/src/core/OWNERS @@ -1,3 +1,4 @@ +set noparent @ctiller @markdroth @dgquintas diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c index 37580087a7..341763a4d7 100644 --- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c +++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c @@ -142,6 +142,21 @@ struct rr_subchannel_list { bool shutting_down; }; +static rr_subchannel_list *rr_subchannel_list_create(round_robin_lb_policy *p, + size_t num_subchannels) { + rr_subchannel_list *subchannel_list = gpr_zalloc(sizeof(*subchannel_list)); + subchannel_list->policy = p; + subchannel_list->subchannels = + gpr_zalloc(sizeof(subchannel_data) * num_subchannels); + subchannel_list->num_subchannels = num_subchannels; + gpr_ref_init(&subchannel_list->refcount, 1); + if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { + gpr_log(GPR_INFO, "[RR %p] Created subchannel list %p for %lu subchannels", + (void *)p, (void *)subchannel_list, (unsigned long)num_subchannels); + } + return subchannel_list; +} + static void rr_subchannel_list_destroy(grpc_exec_ctx *exec_ctx, rr_subchannel_list *subchannel_list) { GPR_ASSERT(subchannel_list->shutting_down); @@ -171,9 +186,9 @@ static void rr_subchannel_list_ref(rr_subchannel_list *subchannel_list, gpr_ref_non_zero(&subchannel_list->refcount); if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count); - gpr_log(GPR_INFO, "[RR %p] subchannel_list %p REF %lu->%lu", + gpr_log(GPR_INFO, "[RR %p] subchannel_list %p REF %lu->%lu (%s)", (void *)subchannel_list->policy, (void *)subchannel_list, - (unsigned long)(count - 1), (unsigned long)count); + (unsigned long)(count - 1), (unsigned long)count, reason); } } @@ -183,9 +198,9 @@ static void rr_subchannel_list_unref(grpc_exec_ctx *exec_ctx, const bool done = gpr_unref(&subchannel_list->refcount); if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count); - gpr_log(GPR_INFO, "[RR %p] subchannel_list %p UNREF %lu->%lu", + gpr_log(GPR_INFO, "[RR %p] subchannel_list %p UNREF %lu->%lu (%s)", (void *)subchannel_list->policy, (void *)subchannel_list, - (unsigned long)(count + 1), (unsigned long)count); + (unsigned long)(count + 1), (unsigned long)count, reason); } if (done) { rr_subchannel_list_destroy(exec_ctx, subchannel_list); @@ -194,19 +209,13 @@ static void rr_subchannel_list_unref(grpc_exec_ctx *exec_ctx, /** Mark \a subchannel_list as discarded. Unsubscribes all its subchannels. The * watcher's callback will ultimately unref \a subchannel_list. */ -static void rr_subchannel_list_shutdown(grpc_exec_ctx *exec_ctx, - rr_subchannel_list *subchannel_list, - const char *reason) { - if (subchannel_list->shutting_down) { - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log(GPR_DEBUG, "Subchannel list %p already shutting down", - (void *)subchannel_list); - } - return; - }; +static void rr_subchannel_list_shutdown_and_unref( + grpc_exec_ctx *exec_ctx, rr_subchannel_list *subchannel_list, + const char *reason) { + GPR_ASSERT(!subchannel_list->shutting_down); if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log(GPR_DEBUG, "Shutting down subchannel_list %p", - (void *)subchannel_list); + gpr_log(GPR_DEBUG, "[RR %p] Shutting down subchannel_list %p (%s)", + (void *)subchannel_list->policy, (void *)subchannel_list, reason); } GPR_ASSERT(!subchannel_list->shutting_down); subchannel_list->shutting_down = true; @@ -214,10 +223,12 @@ static void rr_subchannel_list_shutdown(grpc_exec_ctx *exec_ctx, subchannel_data *sd = &subchannel_list->subchannels[i]; if (sd->subchannel != NULL) { // if subchannel isn't shutdown, unsubscribe. if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log(GPR_DEBUG, - "Unsubscribing from subchannel %p as part of shutting down " - "subchannel_list %p", - (void *)sd->subchannel, (void *)subchannel_list); + gpr_log( + GPR_DEBUG, + "[RR %p] Unsubscribing from subchannel %p as part of shutting down " + "subchannel_list %p", + (void *)subchannel_list->policy, (void *)sd->subchannel, + (void *)subchannel_list); } grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL, @@ -294,7 +305,8 @@ static void update_last_ready_subchannel_index_locked(round_robin_lb_policy *p, static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { round_robin_lb_policy *p = (round_robin_lb_policy *)pol; if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log(GPR_DEBUG, "Destroying Round Robin policy at %p", (void *)pol); + gpr_log(GPR_DEBUG, "[RR %p] Destroying Round Robin policy at %p", + (void *)pol, (void *)pol); } grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker); gpr_free(p); @@ -303,7 +315,8 @@ static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { round_robin_lb_policy *p = (round_robin_lb_policy *)pol; if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log(GPR_DEBUG, "Shutting down Round Robin policy at %p", (void *)pol); + gpr_log(GPR_DEBUG, "[RR %p] Shutting down Round Robin policy at %p", + (void *)pol, (void *)pol); } p->shutdown = true; pending_pick *pp; @@ -318,9 +331,18 @@ static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { grpc_connectivity_state_set( exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "rr_shutdown"); - rr_subchannel_list_shutdown(exec_ctx, p->subchannel_list, - "sl_shutdown_rr_shutdown"); + const bool latest_is_current = + p->subchannel_list == p->latest_pending_subchannel_list; + rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list, + "sl_shutdown_rr_shutdown"); p->subchannel_list = NULL; + if (!latest_is_current && p->latest_pending_subchannel_list != NULL && + !p->latest_pending_subchannel_list->shutting_down) { + rr_subchannel_list_shutdown_and_unref(exec_ctx, + p->latest_pending_subchannel_list, + "sl_shutdown_pending_rr_shutdown"); + p->latest_pending_subchannel_list = NULL; + } } static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, @@ -376,8 +398,8 @@ static void start_picking_locked(grpc_exec_ctx *exec_ctx, p->started_picking = true; for (size_t i = 0; i < p->subchannel_list->num_subchannels; i++) { subchannel_data *sd = &p->subchannel_list->subchannels[i]; - GRPC_LB_POLICY_WEAK_REF(&p->base, "rr_connectivity"); - rr_subchannel_list_ref(sd->subchannel_list, "start_picking"); + GRPC_LB_POLICY_WEAK_REF(&p->base, "start_picking_locked"); + rr_subchannel_list_ref(sd->subchannel_list, "started_picking"); grpc_subchannel_notify_on_state_change( exec_ctx, sd->subchannel, p->base.interested_parties, &sd->pending_connectivity_state_unsafe, @@ -399,7 +421,7 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_closure *on_complete) { round_robin_lb_policy *p = (round_robin_lb_policy *)pol; if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log(GPR_INFO, "Round Robin %p trying to pick", (void *)pol); + gpr_log(GPR_INFO, "[RR %p] Trying to pick", (void *)pol); } if (p->subchannel_list != NULL) { const size_t next_ready_index = get_next_ready_subchannel_index_locked(p); @@ -415,8 +437,8 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { gpr_log( GPR_DEBUG, - "[RR %p] PICKED TARGET <-- SUBCHANNEL %p (CONNECTED %p) (SL %p, " - "INDEX %lu)", + "[RR %p] Picked target <-- Subchannel %p (connected %p) (sl %p, " + "index %lu)", (void *)p, (void *)sd->subchannel, (void *)*target, (void *)sd->subchannel_list, (unsigned long)next_ready_index); } @@ -545,22 +567,27 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg, } // If the policy is shutting down, unref and return. if (p->shutdown) { - rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, "pol_shutdown"); + rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, + "pol_shutdown+started_picking"); GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pol_shutdown"); return; } if (sd->subchannel_list->shutting_down && error == GRPC_ERROR_CANCELLED) { // the subchannel list associated with sd has been discarded. This callback - // corresponds to the unsubscription. - rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, "sl_shutdown"); - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "sl_shutdown"); + // corresponds to the unsubscription. The unrefs correspond to the picking + // ref (start_picking_locked or update_started_picking). + rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, + "sl_shutdown+started_picking"); + GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "sl_shutdown+picking"); return; } // Dispose of outdated subchannel lists. if (sd->subchannel_list != p->subchannel_list && sd->subchannel_list != p->latest_pending_subchannel_list) { // sd belongs to an outdated subchannel_list: get rid of it. - rr_subchannel_list_shutdown(exec_ctx, sd->subchannel_list, "sl_oudated"); + rr_subchannel_list_shutdown_and_unref(exec_ctx, sd->subchannel_list, + "sl_outdated"); + GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "sl_outdated"); return; } // Now that we're inside the combiner, copy the pending connectivity @@ -583,7 +610,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg, sd->user_data = NULL; } if (new_policy_connectivity_state == GRPC_CHANNEL_SHUTDOWN) { - /* the policy is shutting down. Flush all the pending picks... */ + // the policy is shutting down. Flush all the pending picks... pending_pick *pp; while ((pp = p->pending_picks)) { p->pending_picks = pp->next; @@ -592,8 +619,9 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg, gpr_free(pp); } } - /* unref the "rr_connectivity" weak ref from start_picking */ - rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, "sd_shutdown"); + rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, + "sd_shutdown+started_picking"); + // unref the "rr_connectivity_update" weak ref from start_picking. GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "rr_connectivity_sd_shutdown"); } else { // sd not in SHUTDOWN @@ -618,10 +646,10 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg, } if (p->subchannel_list != NULL) { // dispose of the current subchannel_list - rr_subchannel_list_shutdown(exec_ctx, p->subchannel_list, - "sl_shutdown_rr_update_connectivity"); + rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list, + "sl_phase_out_shutdown"); } - p->subchannel_list = sd->subchannel_list; + p->subchannel_list = p->latest_pending_subchannel_list; p->latest_pending_subchannel_list = NULL; } /* at this point we know there's at least one suitable subchannel. Go @@ -632,8 +660,8 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg, subchannel_data *selected = &p->subchannel_list->subchannels[next_ready_index]; if (p->pending_picks != NULL) { - /* if the selected subchannel is going to be used for the pending - * picks, update the last picked pointer */ + // if the selected subchannel is going to be used for the pending + // picks, update the last picked pointer update_last_ready_subchannel_index_locked(p, next_ready_index); } pending_pick *pp; @@ -647,16 +675,17 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg, } if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { gpr_log(GPR_DEBUG, - "[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (INDEX %lu)", - (void *)selected->subchannel, - (unsigned long)next_ready_index); + "[RR %p] Fulfilling pending pick. Target <-- subchannel %p " + "(subchannel_list %p, index %lu)", + (void *)p, (void *)selected->subchannel, + (void *)p->subchannel_list, (unsigned long)next_ready_index); } GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE); gpr_free(pp); } } - /* renew notification: reuses the "rr_connectivity" weak ref on the policy - * as well as the sd->subchannel_list ref. */ + /* renew notification: reuses the "rr_connectivity_update" weak ref on the + * policy as well as the sd->subchannel_list ref. */ grpc_subchannel_notify_on_state_change( exec_ctx, sd->subchannel, p->base.interested_parties, &sd->pending_connectivity_state_unsafe, @@ -714,8 +743,7 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, } else { // otherwise, keep using the current subchannel list (ignore this update). gpr_log(GPR_ERROR, - "No valid LB addresses channel arg for Round Robin %p update, " - "ignoring.", + "[RR %p] No valid LB addresses channel arg for update, ignoring.", (void *)p); } return; @@ -731,24 +759,27 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"), "rr_update_empty"); if (p->subchannel_list != NULL) { - rr_subchannel_list_shutdown(exec_ctx, p->subchannel_list, - "sl_shutdown_rr_update"); + rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list, + "sl_shutdown_empty_update"); p->subchannel_list = NULL; } return; } size_t subchannel_index = 0; - rr_subchannel_list *subchannel_list = gpr_zalloc(sizeof(*subchannel_list)); - subchannel_list->policy = p; - subchannel_list->subchannels = - gpr_zalloc(sizeof(subchannel_data) * num_addrs); - subchannel_list->num_subchannels = num_addrs; - gpr_ref_init(&subchannel_list->refcount, 1); - p->latest_pending_subchannel_list = subchannel_list; - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log(GPR_DEBUG, "Created subchannel list %p for %lu subchannels", - (void *)subchannel_list, (unsigned long)num_addrs); + rr_subchannel_list *subchannel_list = rr_subchannel_list_create(p, num_addrs); + if (p->latest_pending_subchannel_list != NULL && p->started_picking) { + if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { + gpr_log(GPR_DEBUG, + "[RR %p] Shutting down latest pending subchannel list %p, about " + "to be " + "replaced by newer latest %p", + (void *)p, (void *)p->latest_pending_subchannel_list, + (void *)subchannel_list); + } + rr_subchannel_list_shutdown_and_unref( + exec_ctx, p->latest_pending_subchannel_list, "sl_outdated_dont_smash"); } + p->latest_pending_subchannel_list = subchannel_list; grpc_subchannel_args sc_args; /* We need to remove the LB addresses in order to be able to compare the * subchannel keys of subchannels from a different batch of addresses. */ @@ -772,11 +803,12 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { char *address_uri = grpc_sockaddr_to_uri(&addresses->addresses[i].address); - gpr_log(GPR_DEBUG, - "index %lu: Created subchannel %p for address uri %s into " - "subchannel_list %p", - (unsigned long)subchannel_index, (void *)subchannel, address_uri, - (void *)subchannel_list); + gpr_log( + GPR_DEBUG, + "[RR %p] index %lu: Created subchannel %p for address uri %s into " + "subchannel_list %p", + (void *)p, (unsigned long)subchannel_index, (void *)subchannel, + address_uri, (void *)subchannel_list); gpr_free(address_uri); } grpc_channel_args_destroy(exec_ctx, new_args); @@ -815,10 +847,11 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, // The policy isn't picking yet. Save the update for later, disposing of // previous version if any. if (p->subchannel_list != NULL) { - rr_subchannel_list_shutdown(exec_ctx, p->subchannel_list, - "rr_update_before_started_picking"); + rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list, + "rr_update_before_started_picking"); } p->subchannel_list = subchannel_list; + p->latest_pending_subchannel_list = NULL; } } @@ -848,7 +881,7 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx, grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE, "round_robin"); if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log(GPR_DEBUG, "Created Round Robin %p with %lu subchannels", (void *)p, + gpr_log(GPR_DEBUG, "[RR %p] Created with %lu subchannels", (void *)p, (unsigned long)p->subchannel_list->num_subchannels); } return &p->base; diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c index 244b260dfa..9065e33613 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c @@ -236,12 +236,12 @@ static void on_srv_query_done_cb(void *arg, int status, int timeouts, srv_it = srv_it->next) { if (grpc_ipv6_loopback_available()) { grpc_ares_hostbyname_request *hr = create_hostbyname_request( - r, srv_it->host, srv_it->port, true /* is_balancer */); + r, srv_it->host, htons(srv_it->port), true /* is_balancer */); ares_gethostbyname(*channel, hr->host, AF_INET6, on_hostbyname_done_cb, hr); } grpc_ares_hostbyname_request *hr = create_hostbyname_request( - r, srv_it->host, srv_it->port, true /* is_balancer */); + r, srv_it->host, htons(srv_it->port), true /* is_balancer */); ares_gethostbyname(*channel, hr->host, AF_INET, on_hostbyname_done_cb, hr); grpc_ares_ev_driver_start(&exec_ctx, r->ev_driver); diff --git a/src/core/ext/filters/client_channel/subchannel_index.c b/src/core/ext/filters/client_channel/subchannel_index.c index e291ca9db9..a33ab950bf 100644 --- a/src/core/ext/filters/client_channel/subchannel_index.c +++ b/src/core/ext/filters/client_channel/subchannel_index.c @@ -40,6 +40,8 @@ struct grpc_subchannel_key { GPR_TLS_DECL(subchannel_index_exec_ctx); +static bool g_force_creation = false; + static void enter_ctx(grpc_exec_ctx *exec_ctx) { GPR_ASSERT(gpr_tls_get(&subchannel_index_exec_ctx) == 0); gpr_tls_set(&subchannel_index_exec_ctx, (intptr_t)exec_ctx); @@ -84,6 +86,7 @@ static grpc_subchannel_key *subchannel_key_copy(grpc_subchannel_key *k) { int grpc_subchannel_key_compare(const grpc_subchannel_key *a, const grpc_subchannel_key *b) { + if (g_force_creation) return false; int c = GPR_ICMP(a->args.filter_count, b->args.filter_count); if (c != 0) return c; if (a->args.filter_count > 0) { @@ -250,3 +253,7 @@ void grpc_subchannel_index_unregister(grpc_exec_ctx *exec_ctx, leave_ctx(exec_ctx); } + +void grpc_subchannel_index_test_only_set_force_creation(bool force_creation) { + g_force_creation = force_creation; +} diff --git a/src/core/ext/filters/client_channel/subchannel_index.h b/src/core/ext/filters/client_channel/subchannel_index.h index e303bfaa05..98d882a453 100644 --- a/src/core/ext/filters/client_channel/subchannel_index.h +++ b/src/core/ext/filters/client_channel/subchannel_index.h @@ -59,4 +59,16 @@ void grpc_subchannel_index_init(void); /** Shutdown the subchannel index (global) */ void grpc_subchannel_index_shutdown(void); +/** \em TEST ONLY. + * If \a force_creation is true, all key comparisons will be false, resulting in + * new subchannels always being created. Otherwise, the keys will be compared as + * usual. + * + * This function is *not* threadsafe on purpose: it should *only* be used in + * test code. + * + * Tests using this function \em MUST run tests with and without \a + * force_creation set. */ +void grpc_subchannel_index_test_only_set_force_creation(bool force_creation); + #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_INDEX_H */ diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c index a5ae04cb5b..cff77e641c 100644 --- a/src/core/lib/iomgr/ev_posix.c +++ b/src/core/lib/iomgr/ev_posix.c @@ -121,6 +121,10 @@ void grpc_set_event_engine_test_only( g_event_engine = ev_engine; } +const grpc_event_engine_vtable *grpc_get_event_engine_test_only() { + return g_event_engine; +} + /* Call this only after calling grpc_event_engine_init() */ const char *grpc_get_poll_strategy_name() { return g_poll_strategy_name; } diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h index 54c4f2ee11..0de7333843 100644 --- a/src/core/lib/iomgr/ev_posix.h +++ b/src/core/lib/iomgr/ev_posix.h @@ -153,7 +153,9 @@ void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx, typedef int (*grpc_poll_function_type)(struct pollfd *, nfds_t, int); extern grpc_poll_function_type grpc_poll_function; -/* This should be used for testing purposes ONLY */ +/* WARNING: The following two functions should be used for testing purposes + * ONLY */ void grpc_set_event_engine_test_only(const grpc_event_engine_vtable *); +const grpc_event_engine_vtable *grpc_get_event_engine_test_only(); #endif /* GRPC_CORE_LIB_IOMGR_EV_POSIX_H */ diff --git a/src/core/lib/iomgr/timer_manager.c b/src/core/lib/iomgr/timer_manager.c index b9bea9a2ab..631f7935d9 100644 --- a/src/core/lib/iomgr/timer_manager.c +++ b/src/core/lib/iomgr/timer_manager.c @@ -84,7 +84,14 @@ static void start_timer_thread_and_unlock(void) { gpr_thd_options opt = gpr_thd_options_default(); gpr_thd_options_set_joinable(&opt); completed_thread *ct = gpr_malloc(sizeof(*ct)); + // The call to gpr_thd_new() has to be under the same lock used by + // gc_completed_threads(), particularly due to ct->t, which is written here + // (internally by gpr_thd_new) and read there. Otherwise it's possible for ct + // to leak through g_completed_threads and be freed in gc_completed_threads() + // before "&ct->t" is written to, causing a use-after-free. + gpr_mu_lock(&g_mu); gpr_thd_new(&ct->t, timer_thread, ct, &opt); + gpr_mu_unlock(&g_mu); } void grpc_timer_manager_tick() { diff --git a/src/cpp/OWNERS b/src/cpp/OWNERS index 8dca75ce91..986c9a1c3c 100644 --- a/src/cpp/OWNERS +++ b/src/cpp/OWNERS @@ -1,3 +1,4 @@ +set noparent @ctiller @markdroth @dgquintas diff --git a/src/python/OWNERS b/src/python/OWNERS index 73a707c67d..ea3909e0a3 100644 --- a/src/python/OWNERS +++ b/src/python/OWNERS @@ -1,3 +1,5 @@ +set noparent + @nathanielmanistaatgoogle @kpayson64 - +@mehrdada diff --git a/src/python/grpcio/OWNERS b/src/python/grpcio/OWNERS new file mode 100644 index 0000000000..78d54ec96b --- /dev/null +++ b/src/python/grpcio/OWNERS @@ -0,0 +1,3 @@ +@a11y grpc_core_dependencies.py +@ctiller grpc_core_dependencies.py +@nicolasnoble grpc_core_dependencies.py diff --git a/test/core/OWNERS b/test/core/OWNERS index 8dca75ce91..986c9a1c3c 100644 --- a/test/core/OWNERS +++ b/test/core/OWNERS @@ -1,3 +1,4 @@ +set noparent @ctiller @markdroth @dgquintas diff --git a/test/cpp/OWNERS b/test/cpp/OWNERS index 8dca75ce91..986c9a1c3c 100644 --- a/test/cpp/OWNERS +++ b/test/cpp/OWNERS @@ -1,3 +1,4 @@ +set noparent @ctiller @markdroth @dgquintas diff --git a/test/cpp/end2end/client_lb_end2end_test.cc b/test/cpp/end2end/client_lb_end2end_test.cc index 6d3f5a9d46..e1160ecdc6 100644 --- a/test/cpp/end2end/client_lb_end2end_test.cc +++ b/test/cpp/end2end/client_lb_end2end_test.cc @@ -35,6 +35,7 @@ extern "C" { #include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h" +#include "src/core/ext/filters/client_channel/subchannel_index.h" } #include "src/proto/grpc/testing/echo.grpc.pb.h" @@ -331,10 +332,14 @@ TEST_F(ClientLbEnd2endTest, PickFirstManyUpdates) { for (size_t i = 0; i < servers_.size(); ++i) { ports.emplace_back(servers_[i]->port_); } - for (size_t i = 0; i < 1000; ++i) { - std::random_shuffle(ports.begin(), ports.end()); - SetNextResolution(ports); - if (i % 10 == 0) SendRpc(); + for (const bool force_creation : {true, false}) { + grpc_subchannel_index_test_only_set_force_creation(force_creation); + gpr_log(GPR_INFO, "Force subchannel creation: %d", force_creation); + for (size_t i = 0; i < 1000; ++i) { + std::random_shuffle(ports.begin(), ports.end()); + SetNextResolution(ports); + if (i % 10 == 0) SendRpc(); + } } // Check LB policy name for the channel. EXPECT_EQ("pick_first", channel_->GetLoadBalancingPolicyName()); diff --git a/test/cpp/end2end/grpclb_end2end_test.cc b/test/cpp/end2end/grpclb_end2end_test.cc index a8ac631fbd..86cce2d30d 100644 --- a/test/cpp/end2end/grpclb_end2end_test.cc +++ b/test/cpp/end2end/grpclb_end2end_test.cc @@ -195,12 +195,13 @@ class BalancerServiceImpl : public BalancerService { for (const auto& response_and_delay : responses_and_delays) { { std::unique_lock<std::mutex> lock(mu_); - if (shutdown_) break; + if (shutdown_) goto done; } SendResponse(stream, response_and_delay.first, response_and_delay.second); } { std::unique_lock<std::mutex> lock(mu_); + if (shutdown_) goto done; serverlist_cond_.wait(lock); } @@ -210,6 +211,9 @@ class BalancerServiceImpl : public BalancerService { gpr_log(GPR_INFO, "LB: recv client load report msg: '%s'", request.DebugString().c_str()); GPR_ASSERT(request.has_client_stats()); + // We need to acquire the lock here in order to prevent the notify_one + // below from firing before its corresponding wait is executed. + std::lock_guard<std::mutex> lock(mu_); client_stats_.num_calls_started += request.client_stats().num_calls_started(); client_stats_.num_calls_finished += @@ -225,10 +229,9 @@ class BalancerServiceImpl : public BalancerService { .num_calls_finished_with_client_failed_to_send(); client_stats_.num_calls_finished_known_received += request.client_stats().num_calls_finished_known_received(); - std::lock_guard<std::mutex> lock(mu_); load_report_cond_.notify_one(); } - + done: gpr_log(GPR_INFO, "LB: done"); return Status::OK; } @@ -429,19 +432,24 @@ class GrpclbEnd2endTest : public ::testing::Test { explicit ServerThread(const grpc::string& type, const grpc::string& server_host, T* service) : type_(type), service_(service) { + std::mutex mu; + // We need to acquire the lock here in order to prevent the notify_one + // by ServerThread::Start from firing before the wait below is hit. + std::unique_lock<std::mutex> lock(mu); port_ = grpc_pick_unused_port_or_die(); gpr_log(GPR_INFO, "starting %s server on port %d", type_.c_str(), port_); - std::mutex mu; std::condition_variable cond; thread_.reset(new std::thread( std::bind(&ServerThread::Start, this, server_host, &mu, &cond))); - std::unique_lock<std::mutex> lock(mu); cond.wait(lock); gpr_log(GPR_INFO, "%s server startup complete", type_.c_str()); } void Start(const grpc::string& server_host, std::mutex* mu, std::condition_variable* cond) { + // We need to acquire the lock here in order to prevent the notify_one + // below from firing before its corresponding wait is executed. + std::lock_guard<std::mutex> lock(*mu); std::ostringstream server_address; server_address << server_host << ":" << port_; ServerBuilder builder; @@ -449,13 +457,12 @@ class GrpclbEnd2endTest : public ::testing::Test { InsecureServerCredentials()); builder.RegisterService(service_); server_ = builder.BuildAndStart(); - std::lock_guard<std::mutex> lock(*mu); cond->notify_one(); } void Shutdown() { gpr_log(GPR_INFO, "%s about to shutdown", type_.c_str()); - server_->Shutdown(); + server_->Shutdown(grpc_timeout_milliseconds_to_deadline(0)); thread_->join(); gpr_log(GPR_INFO, "%s shutdown completed", type_.c_str()); } @@ -821,6 +828,7 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) { // Kill balancer 0 gpr_log(GPR_INFO, "********** ABOUT TO KILL BALANCER 0 *************"); + balancers_[0]->NotifyDoneWithServerlists(); if (balancers_[0]->Shutdown()) balancer_servers_[0].Shutdown(); gpr_log(GPR_INFO, "********** KILLED BALANCER 0 *************"); diff --git a/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc b/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc index 1e3830a556..f79db15a47 100644 --- a/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc +++ b/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc @@ -41,6 +41,7 @@ namespace testing { static void* g_tag = (void*)(intptr_t)10; // Some random number static grpc_completion_queue* g_cq; static grpc_event_engine_vtable g_vtable; +static const grpc_event_engine_vtable* g_old_vtable; static void pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* ps, grpc_closure* closure) { @@ -72,7 +73,7 @@ static grpc_error* pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* ps, grpc_pollset_worker** worker, gpr_timespec now, gpr_timespec deadline) { if (gpr_time_cmp(deadline, gpr_time_0(GPR_CLOCK_MONOTONIC)) == 0) { - gpr_log(GPR_ERROR, "no-op"); + gpr_log(GPR_DEBUG, "no-op"); return GRPC_ERROR_NONE; } @@ -98,7 +99,12 @@ static void init_engine_vtable() { static void setup() { grpc_init(); + + /* Override the event engine with our test event engine (g_vtable); but before + * that, save the current event engine in g_old_vtable. We will have to set + * g_old_vtable back before calling grpc_shutdown() */ init_engine_vtable(); + g_old_vtable = grpc_get_event_engine_test_only(); grpc_set_event_engine_test_only(&g_vtable); g_cq = grpc_completion_queue_create_for_next(NULL); @@ -115,6 +121,10 @@ static void teardown() { } grpc_completion_queue_destroy(g_cq); + + /* Restore the old event engine before calling grpc_shutdown */ + grpc_set_event_engine_test_only(g_old_vtable); + grpc_shutdown(); } /* A few notes about Multi-threaded benchmarks: diff --git a/tools/distrib/python/OWNERS b/tools/distrib/python/OWNERS new file mode 100644 index 0000000000..ea3909e0a3 --- /dev/null +++ b/tools/distrib/python/OWNERS @@ -0,0 +1,5 @@ +set noparent + +@nathanielmanistaatgoogle +@kpayson64 +@mehrdada diff --git a/tools/dockerfile/grpc_artifact_linux_x64/Dockerfile b/tools/dockerfile/grpc_artifact_linux_x64/Dockerfile index c667655afb..0251b2b392 100644 --- a/tools/dockerfile/grpc_artifact_linux_x64/Dockerfile +++ b/tools/dockerfile/grpc_artifact_linux_x64/Dockerfile @@ -49,19 +49,7 @@ RUN apt-get update && apt-key update && apt-get install -y \ # Install Node dependencies RUN touch .profile RUN curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.25.4/install.sh | bash -RUN /bin/bash -l -c "nvm install 4 && npm install -g node-pre-gyp" - -################## -# Python dependencies - -RUN apt-get update && apt-get install -y \ - python-all-dev \ - python3-all-dev \ - python-pip - -RUN pip install pip --upgrade -RUN pip install virtualenv -RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 +RUN /bin/bash -l -c "nvm install 8 && npm install -g node-pre-gyp" ################## diff --git a/tools/dockerfile/grpc_artifact_linux_x86/Dockerfile b/tools/dockerfile/grpc_artifact_linux_x86/Dockerfile index 79f230186f..2d179c8c45 100644 --- a/tools/dockerfile/grpc_artifact_linux_x86/Dockerfile +++ b/tools/dockerfile/grpc_artifact_linux_x86/Dockerfile @@ -49,19 +49,7 @@ RUN apt-get update && apt-key update && apt-get install -y \ # Install Node dependencies RUN touch .profile RUN curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.25.4/install.sh | bash -RUN /bin/bash -l -c "nvm install 4 && npm install -g node-pre-gyp" - -################## -# Python dependencies - -RUN apt-get update && apt-get install -y \ - python-all-dev \ - python3-all-dev \ - python-pip - -RUN pip install pip --upgrade -RUN pip install virtualenv -RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 +RUN /bin/bash -l -c "nvm install 8 && npm install -g node-pre-gyp" ################## diff --git a/tools/mkowners/mkowners.py b/tools/mkowners/mkowners.py index 18afe3a2f0..2ccedfcfb8 100755 --- a/tools/mkowners/mkowners.py +++ b/tools/mkowners/mkowners.py @@ -126,30 +126,71 @@ owners_data = new_owners_data def full_dir(rules_dir, sub_path): return os.path.join(rules_dir, sub_path) if rules_dir != '.' else sub_path -def glob_intersect(g1, g2): - if not g2: - return all(c == '*' for c in g1) - if not g1: - return all(c == '*' for c in g2) - c1, *t1 = g1 - c2, *t2 = g2 - if c1 == '*': - return glob_intersect(g1, t2) or glob_intersect(t1, g2) - if c2 == '*': - return glob_intersect(t1, g2) or glob_intersect(g1, t2) - return c1 == c2 and glob_intersect(t1, t2) +# glob using git +gg_cache = {} +def git_glob(glob): + global gg_cache + if glob in gg_cache: return gg_cache[glob] + r = set(subprocess + .check_output(['git', 'ls-files', os.path.join(git_root, glob)]) + .decode('utf-8') + .strip() + .splitlines()) + gg_cache[glob] = r + return r + +def expand_directives(root, directives): + globs = collections.OrderedDict() + # build a table of glob --> owners + for directive in directives: + for glob in directive.globs or ['**']: + if glob not in globs: + globs[glob] = [] + if directive.who not in globs[glob]: + globs[glob].append(directive.who) + # expand owners for intersecting globs + sorted_globs = sorted(globs.keys(), + key=lambda g: len(git_glob(full_dir(root, g))), + reverse=True) + out_globs = collections.OrderedDict() + for glob_add in sorted_globs: + who_add = globs[glob_add] + pre_items = [i for i in out_globs.items()] + out_globs[glob_add] = who_add.copy() + for glob_have, who_have in pre_items: + files_add = git_glob(full_dir(root, glob_add)) + files_have = git_glob(full_dir(root, glob_have)) + intersect = files_have.intersection(files_add) + if intersect: + for f in sorted(files_add): # sorted to ensure merge stability + if f not in intersect: + print("X", root, glob_add, glob_have) + out_globs[os.path.relpath(f, start=root)] = who_add + for who in who_have: + if who not in out_globs[glob_add]: + out_globs[glob_add].append(who) + return out_globs def add_parent_to_globs(parent, globs, globs_dir): if not parent: return for owners in owners_data: if owners.dir == parent: - for directive in owners.directives: - for dglob in directive.globs or ['**']: - for gglob, glob in globs.items(): - if glob_intersect(full_dir(globs_dir, gglob), - full_dir(owners.dir, dglob)): - if directive.who not in glob: - glob.append(directive.who) + owners_globs = expand_directives(owners.dir, owners.directives) + for oglob, oglob_who in owners_globs.items(): + for gglob, gglob_who in globs.items(): + files_parent = git_glob(full_dir(owners.dir, oglob)) + files_child = git_glob(full_dir(globs_dir, gglob)) + intersect = files_parent.intersection(files_child) + gglob_who_orig = gglob_who.copy() + if intersect: + for f in sorted(files_child): # sorted to ensure merge stability + if f not in intersect: + print("Y", full_dir(owners.dir, oglob), full_dir(globs_dir, gglob)) + who = gglob_who_orig.copy() + globs[os.path.relpath(f, start=globs_dir)] = who + for who in oglob_who: + if who not in gglob_who: + gglob_who.append(who) add_parent_to_globs(owners.parent, globs, globs_dir) return assert(False) @@ -160,19 +201,30 @@ with open(args.out, 'w') as out: out.write('# Auto-generated by the tools/mkowners/mkowners.py tool\n') out.write('# Uses OWNERS files in different modules throughout the\n') out.write('# repository as the source of truth for module ownership.\n') + written_globs = [] while todo: head, *todo = todo if head.parent and not head.parent in done: todo.append(head) continue - globs = collections.OrderedDict() - for directive in head.directives: - for glob in directive.globs or ['**']: - if glob not in globs: - globs[glob] = [] - globs[glob].append(directive.who) + globs = expand_directives(head.dir, head.directives) add_parent_to_globs(head.parent, globs, head.dir) for glob, owners in globs.items(): - out.write('/%s %s\n' % ( - full_dir(head.dir, glob), ' '.join(owners))) + skip = False + for glob1, owners1, dir1 in reversed(written_globs): + files = git_glob(full_dir(head.dir, glob)) + files1 = git_glob(full_dir(dir1, glob1)) + intersect = files.intersection(files1) + if files == intersect: + if sorted(owners) == sorted(owners1): + skip = True # nothing new in this rule + break + elif intersect: + # continuing would cause a semantic change since some files are + # affected differently by this rule and CODEOWNERS is order dependent + break + if not skip: + out.write('/%s %s\n' % ( + full_dir(head.dir, glob), ' '.join(owners))) + written_globs.append((glob, owners, head.dir)) done.add(head.dir) |