aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/xla/service/hlo_tfgraph_builder.cc
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2017-11-13 11:37:37 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-11-13 11:45:43 -0800
commit3db96abfc5432c190d3afa62ebfad3c1d82cd818 (patch)
treea5926f485e408ad968a77f1e540dbcdeff2f23bb /tensorflow/compiler/xla/service/hlo_tfgraph_builder.cc
parent58f7858601b72aa3c5854571f2152b91d1795e29 (diff)
Allow assigning colors based on HLO sharding information, when generating Graphviz HLO graphs via a new --xla_hlo_graph_sharding_color option.
When generating TF graphs, a new --xla_hlo_tfgraph_device_scopes option allows to prefix the instructions names with a device scope. This help the TF graph viewer to better isolate the parts of the graph which are targeted to different devices, and allow rendering of graphs which would not be able to due to size. Changed TF/XLA broadcast lowering to propagate the request metadata into the HLO broadcast instructions. PiperOrigin-RevId: 175563052
Diffstat (limited to 'tensorflow/compiler/xla/service/hlo_tfgraph_builder.cc')
-rw-r--r--tensorflow/compiler/xla/service/hlo_tfgraph_builder.cc29
1 files changed, 23 insertions, 6 deletions
diff --git a/tensorflow/compiler/xla/service/hlo_tfgraph_builder.cc b/tensorflow/compiler/xla/service/hlo_tfgraph_builder.cc
index 06abe00747..101a710d1c 100644
--- a/tensorflow/compiler/xla/service/hlo_tfgraph_builder.cc
+++ b/tensorflow/compiler/xla/service/hlo_tfgraph_builder.cc
@@ -58,8 +58,6 @@ TensorShapeProto GetTensorShape(const HloInstruction* instruction) {
string GetDeviceName(int device) { return StrCat("/device/XLA:", device); }
-} // namespace
-
void CleanNodeName(string* name) {
name->erase(std::remove(name->begin(), name->end(), '%'), name->end());
const string chars_to_replace = "<>[]";
@@ -70,6 +68,11 @@ void CleanNodeName(string* name) {
std::replace_if(name->begin(), name->end(), pred, '_');
}
+} // namespace
+
+HloTfGraphBuilder::HloTfGraphBuilder(const DebugOptions& debug_options)
+ : debug_options_(debug_options) {}
+
Status HloTfGraphBuilder::AddComputation(const HloComputation& computation) {
VLOG(2) << "Adding computation " << computation.name();
for (auto embedded : computation.MakeEmbeddedComputationsList()) {
@@ -90,24 +93,38 @@ const string& HloTfGraphBuilder::GetNodeNameForInstruction(
if (ContainsKey(instruction_to_node_name_, instruction)) {
return instruction_to_node_name_[instruction];
}
+ auto append = [](string* str, const string& other) {
+ if (str->empty()) {
+ *str = other;
+ } else if (!other.empty()) {
+ StrAppend(str, "/", other);
+ }
+ };
string node_name;
+ if (debug_options_.xla_hlo_tfgraph_device_scopes() &&
+ instruction->has_sharding() &&
+ instruction->sharding().HasUniqueDevice()) {
+ node_name = StrCat(
+ "dev", instruction->sharding().UniqueDevice().ConsumeValueOrDie());
+ }
// If an instruction is fused, put it in the subgraph of the fusion;
// otherwise, put it in the computation subgraph.
const HloComputation* computation = instruction->parent();
if (computation->IsFusionComputation()) {
- node_name = GetNodeNameForInstruction(computation->FusionInstruction());
+ append(&node_name,
+ GetNodeNameForInstruction(computation->FusionInstruction()));
} else {
- node_name = computation->name();
+ append(&node_name, computation->name());
if (!instruction->metadata().op_name().empty()) {
// Always make computations contain TF ops but not the other way around.
- StrAppend(&node_name, "/", instruction->metadata().op_name());
+ append(&node_name, instruction->metadata().op_name());
}
}
string instruction_name = instruction->name();
if (instruction->opcode() == HloOpcode::kParameter) {
StrAppend(&instruction_name, ".", instruction->parameter_number());
}
- StrAppend(&node_name, "/", instruction_name);
+ append(&node_name, instruction_name);
CleanNodeName(&node_name);
auto ret =
instruction_to_node_name_.insert(std::make_pair(instruction, node_name));