diff options
author | HyoukJoong Lee <hyouklee@google.com> | 2018-05-30 18:43:40 -0700 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2018-05-30 18:46:21 -0700 |
commit | d0f9424e22eb438f3d846fa62feaf331797e62c4 (patch) | |
tree | 74c07877c136cc180503e85ad3e81dc51219552b /tensorflow | |
parent | 1479382c92d371843199ec6eb888b05609bf288f (diff) |
Automated g4 rollback of changelist 195379693
PiperOrigin-RevId: 198654780
Diffstat (limited to 'tensorflow')
-rw-r--r-- | tensorflow/compiler/xla/service/hlo_module_group_metadata.cc | 7 | ||||
-rw-r--r-- | tensorflow/compiler/xla/service/hlo_module_group_metadata.h | 3 | ||||
-rw-r--r-- | tensorflow/compiler/xla/service/service.cc | 13 |
3 files changed, 20 insertions, 3 deletions
diff --git a/tensorflow/compiler/xla/service/hlo_module_group_metadata.cc b/tensorflow/compiler/xla/service/hlo_module_group_metadata.cc index 7d706b5fd0..f6fa45a6b7 100644 --- a/tensorflow/compiler/xla/service/hlo_module_group_metadata.cc +++ b/tensorflow/compiler/xla/service/hlo_module_group_metadata.cc @@ -247,6 +247,13 @@ tensorflow::gtl::optional<int64> HloModuleGroupMetadata::GetInstructionDevice( return device; } +int64 HloModuleGroupMetadata::GetDeviceModulesCount() const { + return std::count_if(modules_.begin(), modules_.end(), + [](const HloModule* module) { + return !module->config().is_host_module(); + }); +} + Status HloModuleGroupMetadata::RecordInstructions() { const auto visitor = [this](HloInstruction* hlo) -> Status { if (hlo->opcode() == HloOpcode::kWhile) { diff --git a/tensorflow/compiler/xla/service/hlo_module_group_metadata.h b/tensorflow/compiler/xla/service/hlo_module_group_metadata.h index 5f5bf27479..f68d4028dc 100644 --- a/tensorflow/compiler/xla/service/hlo_module_group_metadata.h +++ b/tensorflow/compiler/xla/service/hlo_module_group_metadata.h @@ -155,6 +155,9 @@ class HloModuleGroupMetadata { tensorflow::gtl::optional<int64> GetInstructionDevice( const HloInstruction& instruction) const; + // Returns the number of modules for devices (excluding the host module). + int64 GetDeviceModulesCount() const; + // Returns the companion instructions for the given instruction. // // Precondition: IsCompanionWhile(instruction) is true. diff --git a/tensorflow/compiler/xla/service/service.cc b/tensorflow/compiler/xla/service/service.cc index cb0f76ebe4..5a813dcadc 100644 --- a/tensorflow/compiler/xla/service/service.cc +++ b/tensorflow/compiler/xla/service/service.cc @@ -624,9 +624,16 @@ Service::ExecuteParallelAndRegisterResult( // profiled. std::map<int64, se::Stream*> index_to_profiled_streams; - TF_ASSIGN_OR_RETURN(DeviceAssignment device_assignment, - backend->computation_placer()->AssignDevices( - options_.number_of_replicas(), executables.size())); + // Build DeviceAssignment for all cores based on the provided device handles. + DeviceAssignment device_assignment(options_.number_of_replicas(), + executables.size()); + for (int64 i = 0; i < executables.size(); i++) { + TF_ASSIGN_OR_RETURN(auto replicas, Replicas(*backend, device_handles[i])); + CHECK_EQ(replicas.size(), arguments[i].size()); + for (int64 replica = 0; replica < replicas.size(); ++replica) { + device_assignment(replica, i) = replicas[replica]->device_ordinal(); + } + } for (int64 i = 0; i < executables.size(); i++) { // Stream executors for the replicas of the current computation. |