aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/core/distributed_runtime
diff options
context:
space:
mode:
authorGravatar Derek Murray <mrry@google.com>2018-06-05 08:20:41 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-06-05 08:23:35 -0700
commit3653e80488f490ad744410a92ac287acf7035bda (patch)
tree1c5eef40b85ed560a8e0de4870564a5e4582ea7c /tensorflow/core/distributed_runtime
parent274f9510f68f237589df5c6a414e4b8e5ebcdba1 (diff)
Address compiler warnings in tensorflow/core/distributed_runtime.
PiperOrigin-RevId: 199299538
Diffstat (limited to 'tensorflow/core/distributed_runtime')
-rw-r--r--tensorflow/core/distributed_runtime/local_master.h2
-rw-r--r--tensorflow/core/distributed_runtime/master.cc8
-rw-r--r--tensorflow/core/distributed_runtime/master_session.cc7
-rw-r--r--tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc4
4 files changed, 10 insertions, 11 deletions
diff --git a/tensorflow/core/distributed_runtime/local_master.h b/tensorflow/core/distributed_runtime/local_master.h
index cad6babad8..b9c76d0f1d 100644
--- a/tensorflow/core/distributed_runtime/local_master.h
+++ b/tensorflow/core/distributed_runtime/local_master.h
@@ -79,7 +79,7 @@ class LocalMaster : public MasterInterface {
RunCallableResponse* response) override;
Status ReleaseCallable(CallOptions* call_options,
const ReleaseCallableRequest* request,
- ReleaseCallableResponse* response);
+ ReleaseCallableResponse* response) override;
// Registers the mapping from the given `target` to the given `master`.
//
diff --git a/tensorflow/core/distributed_runtime/master.cc b/tensorflow/core/distributed_runtime/master.cc
index 4f9d84d158..a48f734d3e 100644
--- a/tensorflow/core/distributed_runtime/master.cc
+++ b/tensorflow/core/distributed_runtime/master.cc
@@ -473,7 +473,7 @@ void Master::PartialRunSetup(const PartialRunSetupRequest* req,
return;
}
- SchedClosure([this, session, req, resp, done]() {
+ SchedClosure([session, req, resp, done]() {
Status s = session->PartialRunSetup(req, resp);
session->Unref();
done(s);
@@ -628,7 +628,7 @@ void Master::MakeCallable(const MakeCallableRequest* req,
}
SchedClosure(std::bind(
- [this, session, req, resp](MyClosure done) {
+ [session, req, resp](MyClosure done) {
Status s = session->MakeCallable(*req, resp);
session->Unref();
done(s);
@@ -645,7 +645,7 @@ void Master::RunCallable(CallOptions* opts, const RunCallableRequest* req,
}
SchedClosure(std::bind(
- [this, session, opts, req, resp](MyClosure done) {
+ [session, opts, req, resp](MyClosure done) {
Status s = session->RunCallable(opts, *req, resp);
session->Unref();
done(s);
@@ -662,7 +662,7 @@ void Master::ReleaseCallable(const ReleaseCallableRequest* req,
}
SchedClosure(std::bind(
- [this, session, req, resp](MyClosure done) {
+ [session, req, resp](MyClosure done) {
Status s = session->ReleaseCallable(*req, resp);
session->Unref();
done(s);
diff --git a/tensorflow/core/distributed_runtime/master_session.cc b/tensorflow/core/distributed_runtime/master_session.cc
index bd70eca3f6..e29bb76ddf 100644
--- a/tensorflow/core/distributed_runtime/master_session.cc
+++ b/tensorflow/core/distributed_runtime/master_session.cc
@@ -156,8 +156,7 @@ class MasterSession::ReffedClientGraph : public core::RefCounted {
LoggingResponse* resp = new LoggingResponse;
p.worker->LoggingAsync(
&req, resp,
- [step_id, ss, resp, &scoped_mu, &waiting_for,
- &all_done](const Status& s) {
+ [step_id, ss, resp, &scoped_mu, &all_done](const Status& s) {
{
mutex_lock l(scoped_mu);
if (s.ok()) {
@@ -1207,7 +1206,7 @@ Status MasterSession::CreateWorkerSessions(
std::vector<WorkerGroup> workers(worker_names.size());
// Release the workers.
- auto cleanup = gtl::MakeCleanup([this, &workers, worker_cache] {
+ auto cleanup = gtl::MakeCleanup([&workers, worker_cache] {
for (auto&& worker_group : workers) {
if (worker_group.worker != nullptr) {
worker_cache->ReleaseWorker(*worker_group.name, worker_group.worker);
@@ -1289,7 +1288,7 @@ Status MasterSession::DeleteWorkerSessions() {
std::vector<WorkerGroup> workers(worker_names.size());
// Release the workers.
- auto cleanup = gtl::MakeCleanup([this, &workers, worker_cache] {
+ auto cleanup = gtl::MakeCleanup([&workers, worker_cache] {
for (auto&& worker_group : workers) {
if (worker_group.worker != nullptr) {
worker_cache->ReleaseWorker(*worker_group.name, worker_group.worker);
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc b/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc
index 2e7b111963..aa9304a033 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc
@@ -513,8 +513,8 @@ void GrpcWorker::RecvBufAsync(CallOptions* opts, const RecvBufRequest* request,
CollectiveRemoteAccess* rma = ce_handle.get()->remote_access();
rma->buf_rendezvous()->ConsumeBuf(
request->buf_rendezvous_key(),
- [this, opts, request, response, done](const Status& status,
- BufRendezvous::Hook* hook) {
+ [this, request, response, done](const Status& status,
+ BufRendezvous::Hook* hook) {
Status s = status;
if (s.ok()) {
if (!DMAHelper::CanUseDMA(hook->prod_value)) {