aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/core/distributed_runtime/worker.h
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-05-01 13:15:53 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-05-01 13:19:40 -0700
commit9149558a639efe82baf1b5201feccf2411343a8a (patch)
tree1a6d3648dc5c2c59a00ca37c0f72c4eee81cc378 /tensorflow/core/distributed_runtime/worker.h
parent1a50cd4ca8c4fe1c1a9ea14f219fd98be8704a7d (diff)
Collective Ops Part 5
Distributed-mode implementations of DeviceResolverInterface and ParamResolverInterface. Extend Worker interface with new methods in support of these interfaces. This change is part of a series of changes introducing infrastructure for collective ops and initial implementations of reduction and broadcast. PiperOrigin-RevId: 194984585
Diffstat (limited to 'tensorflow/core/distributed_runtime/worker.h')
-rw-r--r--tensorflow/core/distributed_runtime/worker.h17
1 files changed, 15 insertions, 2 deletions
diff --git a/tensorflow/core/distributed_runtime/worker.h b/tensorflow/core/distributed_runtime/worker.h
index 19aeeb752c..b5a9ada502 100644
--- a/tensorflow/core/distributed_runtime/worker.h
+++ b/tensorflow/core/distributed_runtime/worker.h
@@ -90,6 +90,20 @@ class Worker : public WorkerInterface {
void TracingAsync(const TracingRequest* request, TracingResponse* response,
StatusCallback done) override;
+ void CompleteGroupAsync(CallOptions* opts,
+ const CompleteGroupRequest* request,
+ CompleteGroupResponse* response,
+ StatusCallback done) override;
+
+ void CompleteInstanceAsync(CallOptions* opts,
+ const CompleteInstanceRequest* request,
+ CompleteInstanceResponse* response,
+ StatusCallback done) override;
+
+ void GetStepSequenceAsync(const GetStepSequenceRequest* request,
+ GetStepSequenceResponse* response,
+ StatusCallback done) override;
+
protected:
WorkerEnv* const env_; // Not owned.
@@ -101,8 +115,7 @@ class Worker : public WorkerInterface {
private:
PartialRunMgr partial_run_mgr_;
- mutex mu_;
- CancellationManager* cancellation_manager_ GUARDED_BY(mu_);
+ CancellationManager cancellation_manager_;
Status PrepareRunGraph(RunGraphRequestWrapper* req,
GraphMgr::NamedTensors* in,