aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
authorGravatar Yihua Zhang <yihuaz@google.com>2018-12-06 09:06:33 -0800
committerGravatar Yihua Zhang <yihuaz@google.com>2018-12-06 09:06:33 -0800
commitda1eae202c31a998610a31145f34fd5c49631712 (patch)
treea71bd382c67ed020f34ede87f32ecddc607850ad /src/core
parent750e80ea1c12315ce987ed7b5c38595f698cf355 (diff)
parent8ba8dadcc924296a120ce50329cd08fdfec6aec6 (diff)
Merge remote-tracking branch 'upstream/master' into gdc_metadata_server
Diffstat (limited to 'src/core')
-rw-r--r--src/core/lib/channel/channelz.cc6
-rw-r--r--src/core/lib/iomgr/call_combiner.cc52
-rw-r--r--src/core/lib/iomgr/call_combiner.h31
-rw-r--r--src/core/lib/iomgr/dynamic_annotations.h67
4 files changed, 147 insertions, 9 deletions
diff --git a/src/core/lib/channel/channelz.cc b/src/core/lib/channel/channelz.cc
index 8d589f5983..0802143fbe 100644
--- a/src/core/lib/channel/channelz.cc
+++ b/src/core/lib/channel/channelz.cc
@@ -208,11 +208,7 @@ char* ServerNode::RenderServerSockets(intptr_t start_socket_id) {
grpc_json* json = top_level_json;
grpc_json* json_iterator = nullptr;
ChildRefsList socket_refs;
- // uuids index into entities one-off (idx 0 is really uuid 1, since 0 is
- // reserved). However, we want to support requests coming in with
- // start_server_id=0, which signifies "give me everything."
- size_t start_idx = start_socket_id == 0 ? 0 : start_socket_id - 1;
- grpc_server_populate_server_sockets(server_, &socket_refs, start_idx);
+ grpc_server_populate_server_sockets(server_, &socket_refs, start_socket_id);
if (!socket_refs.empty()) {
// create list of socket refs
grpc_json* array_parent = grpc_json_create_child(
diff --git a/src/core/lib/iomgr/call_combiner.cc b/src/core/lib/iomgr/call_combiner.cc
index 90dda45ba3..6b5759a036 100644
--- a/src/core/lib/iomgr/call_combiner.cc
+++ b/src/core/lib/iomgr/call_combiner.cc
@@ -39,10 +39,57 @@ static gpr_atm encode_cancel_state_error(grpc_error* error) {
return static_cast<gpr_atm>(1) | (gpr_atm)error;
}
+#ifdef GRPC_TSAN_ENABLED
+static void tsan_closure(void* user_data, grpc_error* error) {
+ grpc_call_combiner* call_combiner =
+ static_cast<grpc_call_combiner*>(user_data);
+ // We ref-count the lock, and check if it's already taken.
+ // If it was taken, we should do nothing. Otherwise, we will mark it as
+ // locked. Note that if two different threads try to do this, only one of
+ // them will be able to mark the lock as acquired, while they both run their
+ // callbacks. In such cases (which should never happen for call_combiner),
+ // TSAN will correctly produce an error.
+ //
+ // TODO(soheil): This only covers the callbacks scheduled by
+ // grpc_call_combiner_(start|finish). If in the future, a
+ // callback gets scheduled using other mechanisms, we will need
+ // to add APIs to externally lock call combiners.
+ grpc_core::RefCountedPtr<grpc_call_combiner::TsanLock> lock =
+ call_combiner->tsan_lock;
+ bool prev = false;
+ if (lock->taken.compare_exchange_strong(prev, true)) {
+ TSAN_ANNOTATE_RWLOCK_ACQUIRED(&lock->taken, true);
+ } else {
+ lock.reset();
+ }
+ GRPC_CLOSURE_RUN(call_combiner->original_closure, GRPC_ERROR_REF(error));
+ if (lock != nullptr) {
+ TSAN_ANNOTATE_RWLOCK_RELEASED(&lock->taken, true);
+ bool prev = true;
+ GPR_ASSERT(lock->taken.compare_exchange_strong(prev, false));
+ }
+}
+#endif
+
+static void call_combiner_sched_closure(grpc_call_combiner* call_combiner,
+ grpc_closure* closure,
+ grpc_error* error) {
+#ifdef GRPC_TSAN_ENABLED
+ call_combiner->original_closure = closure;
+ GRPC_CLOSURE_SCHED(&call_combiner->tsan_closure, error);
+#else
+ GRPC_CLOSURE_SCHED(closure, error);
+#endif
+}
+
void grpc_call_combiner_init(grpc_call_combiner* call_combiner) {
gpr_atm_no_barrier_store(&call_combiner->cancel_state, 0);
gpr_atm_no_barrier_store(&call_combiner->size, 0);
gpr_mpscq_init(&call_combiner->queue);
+#ifdef GRPC_TSAN_ENABLED
+ GRPC_CLOSURE_INIT(&call_combiner->tsan_closure, tsan_closure, call_combiner,
+ grpc_schedule_on_exec_ctx);
+#endif
}
void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner) {
@@ -87,7 +134,7 @@ void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
gpr_log(GPR_INFO, " EXECUTING IMMEDIATELY");
}
// Queue was empty, so execute this closure immediately.
- GRPC_CLOSURE_SCHED(closure, error);
+ call_combiner_sched_closure(call_combiner, closure, error);
} else {
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_INFO, " QUEUING");
@@ -134,7 +181,8 @@ void grpc_call_combiner_stop(grpc_call_combiner* call_combiner DEBUG_ARGS,
gpr_log(GPR_INFO, " EXECUTING FROM QUEUE: closure=%p error=%s",
closure, grpc_error_string(closure->error_data.error));
}
- GRPC_CLOSURE_SCHED(closure, closure->error_data.error);
+ call_combiner_sched_closure(call_combiner, closure,
+ closure->error_data.error);
break;
}
} else if (grpc_call_combiner_trace.enabled()) {
diff --git a/src/core/lib/iomgr/call_combiner.h b/src/core/lib/iomgr/call_combiner.h
index c943fb1557..4ec0044f05 100644
--- a/src/core/lib/iomgr/call_combiner.h
+++ b/src/core/lib/iomgr/call_combiner.h
@@ -27,7 +27,10 @@
#include "src/core/lib/gpr/mpscq.h"
#include "src/core/lib/gprpp/inlined_vector.h"
+#include "src/core/lib/gprpp/ref_counted.h"
+#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/closure.h"
+#include "src/core/lib/iomgr/dynamic_annotations.h"
// A simple, lock-free mechanism for serializing activity related to a
// single call. This is similar to a combiner but is more lightweight.
@@ -40,14 +43,38 @@
extern grpc_core::TraceFlag grpc_call_combiner_trace;
-typedef struct {
+struct grpc_call_combiner {
gpr_atm size = 0; // size_t, num closures in queue or currently executing
gpr_mpscq queue;
// Either 0 (if not cancelled and no cancellation closure set),
// a grpc_closure* (if the lowest bit is 0),
// or a grpc_error* (if the lowest bit is 1).
gpr_atm cancel_state = 0;
-} grpc_call_combiner;
+#ifdef GRPC_TSAN_ENABLED
+ // A fake ref-counted lock that is kept alive after the destruction of
+ // grpc_call_combiner, when we are running the original closure.
+ //
+ // Ideally we want to lock and unlock the call combiner as a pointer, when the
+ // callback is called. However, original_closure is free to trigger
+ // anything on the call combiner (including destruction of grpc_call).
+ // Thus, we need a ref-counted structure that can outlive the call combiner.
+ struct TsanLock
+ : public grpc_core::RefCounted<TsanLock,
+ grpc_core::NonPolymorphicRefCount> {
+ TsanLock() { TSAN_ANNOTATE_RWLOCK_CREATE(&taken); }
+ ~TsanLock() { TSAN_ANNOTATE_RWLOCK_DESTROY(&taken); }
+
+ // To avoid double-locking by the same thread, we should acquire/release
+ // the lock only when taken is false. On each acquire taken must be set to
+ // true.
+ std::atomic<bool> taken{false};
+ };
+ grpc_core::RefCountedPtr<TsanLock> tsan_lock =
+ grpc_core::MakeRefCounted<TsanLock>();
+ grpc_closure tsan_closure;
+ grpc_closure* original_closure;
+#endif
+};
// Assumes memory was initialized to zero.
void grpc_call_combiner_init(grpc_call_combiner* call_combiner);
diff --git a/src/core/lib/iomgr/dynamic_annotations.h b/src/core/lib/iomgr/dynamic_annotations.h
new file mode 100644
index 0000000000..713928023a
--- /dev/null
+++ b/src/core/lib/iomgr/dynamic_annotations.h
@@ -0,0 +1,67 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_DYNAMIC_ANNOTATIONS_H
+#define GRPC_CORE_LIB_IOMGR_DYNAMIC_ANNOTATIONS_H
+
+#include <grpc/support/port_platform.h>
+
+#ifdef GRPC_TSAN_ENABLED
+
+#define TSAN_ANNOTATE_HAPPENS_BEFORE(addr) \
+ AnnotateHappensBefore(__FILE__, __LINE__, (void*)(addr))
+#define TSAN_ANNOTATE_HAPPENS_AFTER(addr) \
+ AnnotateHappensAfter(__FILE__, __LINE__, (void*)(addr))
+#define TSAN_ANNOTATE_RWLOCK_CREATE(addr) \
+ AnnotateRWLockCreate(__FILE__, __LINE__, (void*)(addr))
+#define TSAN_ANNOTATE_RWLOCK_DESTROY(addr) \
+ AnnotateRWLockDestroy(__FILE__, __LINE__, (void*)(addr))
+#define TSAN_ANNOTATE_RWLOCK_ACQUIRED(addr, is_w) \
+ AnnotateRWLockAcquired(__FILE__, __LINE__, (void*)(addr), (is_w))
+#define TSAN_ANNOTATE_RWLOCK_RELEASED(addr, is_w) \
+ AnnotateRWLockReleased(__FILE__, __LINE__, (void*)(addr), (is_w))
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+void AnnotateHappensBefore(const char* file, int line, const volatile void* cv);
+void AnnotateHappensAfter(const char* file, int line, const volatile void* cv);
+void AnnotateRWLockCreate(const char* file, int line,
+ const volatile void* lock);
+void AnnotateRWLockDestroy(const char* file, int line,
+ const volatile void* lock);
+void AnnotateRWLockAcquired(const char* file, int line,
+ const volatile void* lock, long is_w);
+void AnnotateRWLockReleased(const char* file, int line,
+ const volatile void* lock, long is_w);
+#ifdef __cplusplus
+}
+#endif
+
+#else /* GRPC_TSAN_ENABLED */
+
+#define TSAN_ANNOTATE_HAPPENS_BEFORE(addr)
+#define TSAN_ANNOTATE_HAPPENS_AFTER(addr)
+#define TSAN_ANNOTATE_RWLOCK_CREATE(addr)
+#define TSAN_ANNOTATE_RWLOCK_DESTROY(addr)
+#define TSAN_ANNOTATE_RWLOCK_ACQUIRED(addr, is_w)
+#define TSAN_ANNOTATE_RWLOCK_RELEASED(addr, is_w)
+
+#endif /* GRPC_TSAN_ENABLED */
+
+#endif /* GRPC_CORE_LIB_IOMGR_DYNAMIC_ANNOTATIONS_H */