From 9070f24ae15a4f589219d4cb9c962b14612c2d8c Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Fri, 8 Jun 2018 18:12:16 -0700 Subject: Collective Ops Part 8 Enable collective op execution in distibuted mode: Pass collective_graph_key into graph building and step execution contexts (MasterSession) where it triggers allocation of an RpcCollectiveExecutorMgr that becomes accessible via the WorkerEnv and MasterEnv. The collective_graph_key is used to synchronize step_ids (which are otherwise random) between otherwise independent graph executions that contain collective ops that need to rendezvous. All APIs for using collectives are still non-public and experimental. PiperOrigin-RevId: 199879087 --- .../collective_param_resolver_distributed_test.cc | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'tensorflow/core/distributed_runtime/collective_param_resolver_distributed_test.cc') diff --git a/tensorflow/core/distributed_runtime/collective_param_resolver_distributed_test.cc b/tensorflow/core/distributed_runtime/collective_param_resolver_distributed_test.cc index 95a010286d..4eed856759 100644 --- a/tensorflow/core/distributed_runtime/collective_param_resolver_distributed_test.cc +++ b/tensorflow/core/distributed_runtime/collective_param_resolver_distributed_test.cc @@ -147,10 +147,9 @@ class DeviceResDistTest : public ::testing::Test { ConfigProto config; for (int w = 0; w < num_workers; ++w) { string name = strings::StrCat("/job:worker/replica:0/task:", w); - // TODO(tucker): When config option becomes available, set here. - // if (w == 0) { - // config.set_collective_group_leader(name); - // } + if (w == 0) { + config.mutable_experimental()->set_collective_group_leader(name); + } DefineWorker(config, name, device_type, num_devices); } } -- cgit v1.2.3