aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-06-11 14:10:47 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-06-11 14:15:48 -0700
commit21aa82e1a12eb53fe4c94006f957c1adab9aa662 (patch)
tree85688ae259c7df5d0e7546299041ec75e4895861
parent0d9b4f06b7242288a3aeb0d29fe10278522c7f45 (diff)
[XLA] Sanitize HloComputation and HloInstruction names.
PiperOrigin-RevId: 200110003
-rw-r--r--tensorflow/compiler/xla/service/buffer_assignment_test.cc38
-rw-r--r--tensorflow/compiler/xla/service/hlo_computation.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_graph_dumper_test.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_instruction.cc4
-rw-r--r--tensorflow/compiler/xla/service/hlo_instruction.h9
-rw-r--r--tensorflow/compiler/xla/service/hlo_instruction_test.cc4
-rw-r--r--tensorflow/compiler/xla/service/hlo_module.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_parser.cc7
-rw-r--r--tensorflow/compiler/xla/service/transpose_folding_test.cc2
9 files changed, 40 insertions, 30 deletions
diff --git a/tensorflow/compiler/xla/service/buffer_assignment_test.cc b/tensorflow/compiler/xla/service/buffer_assignment_test.cc
index 7e86c33687..96d25675de 100644
--- a/tensorflow/compiler/xla/service/buffer_assignment_test.cc
+++ b/tensorflow/compiler/xla/service/buffer_assignment_test.cc
@@ -371,11 +371,11 @@ TEST_F(BufferAssignmentTest, Basic) {
// param1[100] --------------/--------/
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
- builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, ""));
+ builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto param0 = builder.AddInstruction(
- HloInstruction::CreateParameter(1, f32vec100_, ""));
+ HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
- HloInstruction::CreateParameter(2, f32vec100_, ""));
+ HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, paramscalar, param0));
auto add = builder.AddInstruction(
@@ -418,11 +418,11 @@ TEST_F(BufferAssignmentTest, BasicUniquelyColored) {
// share anything.
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
- builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, ""));
+ builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto param0 = builder.AddInstruction(
- HloInstruction::CreateParameter(1, f32vec100_, ""));
+ HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
- HloInstruction::CreateParameter(2, f32vec100_, ""));
+ HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, paramscalar, param0));
auto add = builder.AddInstruction(
@@ -477,11 +477,11 @@ TEST_F(BufferAssignmentTest, BasicPartiallyColored) {
// have the color 0, which allows the mul and add to share buffers.
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
- builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, ""));
+ builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto param0 = builder.AddInstruction(
- HloInstruction::CreateParameter(1, f32vec100_, ""));
+ HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
- HloInstruction::CreateParameter(2, f32vec100_, ""));
+ HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, paramscalar, param0));
auto add = builder.AddInstruction(
@@ -547,11 +547,11 @@ TEST_F(BufferAssignmentTest, MultipleUsersForNode) {
//
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
- builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, ""));
+ builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto param0 = builder.AddInstruction(
- HloInstruction::CreateParameter(1, f32vec100_, ""));
+ HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
- HloInstruction::CreateParameter(2, f32vec100_, ""));
+ HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, paramscalar, param0));
auto add = builder.AddInstruction(
@@ -601,7 +601,7 @@ TEST_F(BufferAssignmentTest, TrivialMap) {
// Creates the main kernel and verifies instruction counts.
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
- HloInstruction::CreateParameter(0, f32a100x10_, ""));
+ HloInstruction::CreateParameter(0, f32a100x10_, "p"));
auto map = builder.AddInstruction(
HloInstruction::CreateMap(f32a100x10_, {param0}, map_computation));
module->AddEntryComputation(builder.Build());
@@ -654,7 +654,7 @@ TEST_F(BufferAssignmentTest, CannotReuseInputBufferOfReduce) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
- HloInstruction::CreateParameter(0, f32a100x10_, ""));
+ HloInstruction::CreateParameter(0, f32a100x10_, "p"));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(f32a100x10_, HloOpcode::kExp, param0));
auto exp2 = builder.AddInstruction(
@@ -818,7 +818,7 @@ TEST_F(BufferAssignmentTest, UnaryOpReuseChain) {
// param0[100] ---> (exp) ---> (tanh) ---> (exp) ---> (neg)
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
- HloInstruction::CreateParameter(0, f32vec100_, ""));
+ HloInstruction::CreateParameter(0, f32vec100_, "p"));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kExp, param0));
auto tanh = builder.AddInstruction(
@@ -1496,11 +1496,11 @@ TEST_F(BufferAssignmentTest, TrivialPeakBuffers) {
// param1[100] --------------/--------/
auto builder = HloComputation::Builder(TestName());
auto paramscalar =
- builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, ""));
+ builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p"));
auto param0 = builder.AddInstruction(
- HloInstruction::CreateParameter(1, f32vec100_, ""));
+ HloInstruction::CreateParameter(1, f32vec100_, "p1"));
auto param1 = builder.AddInstruction(
- HloInstruction::CreateParameter(2, f32vec100_, ""));
+ HloInstruction::CreateParameter(2, f32vec100_, "p2"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec100_, HloOpcode::kMultiply, paramscalar, param0));
auto add = builder.AddInstruction(
@@ -1536,7 +1536,7 @@ TEST_F(BufferAssignmentTest, PeakBuffers) {
// be {%rev, %neg, %concat}. This occurs right at the concat itself.
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
- HloInstruction::CreateParameter(0, f32vec100_, ""));
+ HloInstruction::CreateParameter(0, f32vec100_, "p"));
auto log = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec100_, HloOpcode::kLog, param));
auto rev = builder.AddInstruction(
diff --git a/tensorflow/compiler/xla/service/hlo_computation.cc b/tensorflow/compiler/xla/service/hlo_computation.cc
index ed0ea39ff5..763d9d2269 100644
--- a/tensorflow/compiler/xla/service/hlo_computation.cc
+++ b/tensorflow/compiler/xla/service/hlo_computation.cc
@@ -64,7 +64,7 @@ HloComputation::HloComputation(
const string& name, int parameter_count,
std::vector<std::unique_ptr<HloInstruction>>* instructions,
HloInstruction* root_instruction, HloInstruction* fusion_instruction)
- : name_(name),
+ : name_(NameUniquer::GetSanitizedName(name)),
unique_id_(-1),
root_instruction_(root_instruction),
fusion_instruction_(fusion_instruction) {
diff --git a/tensorflow/compiler/xla/service/hlo_graph_dumper_test.cc b/tensorflow/compiler/xla/service/hlo_graph_dumper_test.cc
index 8e52d926d8..68f41a1cbb 100644
--- a/tensorflow/compiler/xla/service/hlo_graph_dumper_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_graph_dumper_test.cc
@@ -121,7 +121,7 @@ TEST(HloGraphDumperTest, Constant) {
HloComputation::Builder b("b");
auto instruction = b.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateR0<float>(-42)));
- instruction->set_name("i_am_a_constant_root_instruction");
+ instruction->SetAndSanitizeName("i_am_a_constant_root_instruction");
HloModuleConfig config;
HloModule m(TestName(), config);
HloComputation* root_computation = m.AddEntryComputation(b.Build());
diff --git a/tensorflow/compiler/xla/service/hlo_instruction.cc b/tensorflow/compiler/xla/service/hlo_instruction.cc
index f0fec77c31..c89d836888 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction.cc
+++ b/tensorflow/compiler/xla/service/hlo_instruction.cc
@@ -231,7 +231,7 @@ StatusOr<std::unique_ptr<HloInstruction>> HloInstruction::CreateFromProto(
}
TF_RET_CHECK(!proto.name().empty());
- instruction->name_ = proto.name();
+ instruction->SetAndSanitizeName(proto.name());
instruction->metadata_ = proto.metadata();
instruction->backend_config_ = proto.backend_config();
@@ -295,7 +295,7 @@ StatusOr<std::unique_ptr<HloInstruction>> HloInstruction::CreateFromProto(
auto instruction =
WrapUnique(new HloInstruction(HloOpcode::kParameter, shape));
instruction->parameter_number_ = parameter_number;
- instruction->name_ = name;
+ instruction->SetAndSanitizeName(name);
return instruction;
}
diff --git a/tensorflow/compiler/xla/service/hlo_instruction.h b/tensorflow/compiler/xla/service/hlo_instruction.h
index 5c5def58d3..ae1c563b56 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction.h
+++ b/tensorflow/compiler/xla/service/hlo_instruction.h
@@ -1364,9 +1364,14 @@ class HloInstruction {
std::tuple<bool, std::vector<int64>, std::vector<int64>>
ReshapeMerelyInsertsOrDeletes1SizedDimensions() const;
- // Gets/sets the string identifier for this instruction.
+ // Gets the string identifier for this instruction.
const string& name() const { return name_; }
- void set_name(tensorflow::StringPiece name) { name_ = std::string(name); }
+
+ // Sets the string identifier for this instruction. Name will be sanitized to
+ // match the regexp "[a-zA-Z_][a-zA-Z0-9_.-]*".
+ void SetAndSanitizeName(const string& name) {
+ name_ = NameUniquer::GetSanitizedName(name);
+ }
// Use the given NameUniquer to select a unique name for the instruction based
// on the instruction's existing name.
diff --git a/tensorflow/compiler/xla/service/hlo_instruction_test.cc b/tensorflow/compiler/xla/service/hlo_instruction_test.cc
index 76349c4099..5d6f8b931f 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_instruction_test.cc
@@ -342,7 +342,7 @@ TEST_F(HloInstructionTest, TrivialMap) {
// Builds a parameter and feeds it to the map.
HloComputation::Builder builder(TestName());
auto param0 = builder.AddInstruction(
- HloInstruction::CreateParameter(0, f32a100x10, ""));
+ HloInstruction::CreateParameter(0, f32a100x10, "p"));
auto map = builder.AddInstruction(
HloInstruction::CreateMap(f32a100x10, {param0}, add_f32));
module->AddEntryComputation(builder.Build());
@@ -381,7 +381,7 @@ TEST_F(HloInstructionTest, TrivialReduce) {
// Builds a parameter and an initial value and feeds them to the reduce.
HloComputation::Builder builder(TestName());
auto param0 = builder.AddInstruction(
- HloInstruction::CreateParameter(0, f32a100x10, ""));
+ HloInstruction::CreateParameter(0, f32a100x10, "p"));
auto const0 = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)));
builder.AddInstruction(
diff --git a/tensorflow/compiler/xla/service/hlo_module.cc b/tensorflow/compiler/xla/service/hlo_module.cc
index ab60258677..9c59374b4a 100644
--- a/tensorflow/compiler/xla/service/hlo_module.cc
+++ b/tensorflow/compiler/xla/service/hlo_module.cc
@@ -390,7 +390,7 @@ HloInstruction* HloModule::OutlineExpressionFromComputation(
// as a parameter in the new function.
arguments.push_back(old_operand);
*operand_slot = builder.AddInstruction(HloInstruction::CreateParameter(
- parameter_count, old_operand->shape(), ""));
+ parameter_count, old_operand->shape(), "p"));
++parameter_count;
}
TF_CHECK_OK(
diff --git a/tensorflow/compiler/xla/service/hlo_parser.cc b/tensorflow/compiler/xla/service/hlo_parser.cc
index bf1c7b9323..4aa4406292 100644
--- a/tensorflow/compiler/xla/service/hlo_parser.cc
+++ b/tensorflow/compiler/xla/service/hlo_parser.cc
@@ -1148,7 +1148,12 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
HloOpcodeString(opcode)));
}
- instruction->set_name(name);
+ instruction->SetAndSanitizeName(name);
+ if (instruction->name() != name) {
+ return Error(name_loc,
+ StrCat("illegal instruction name: ", name,
+ "; suggest renaming to: ", instruction->name()));
+ }
// Add shared attributes like metadata to the instruction, if they were seen.
if (sharding) {
diff --git a/tensorflow/compiler/xla/service/transpose_folding_test.cc b/tensorflow/compiler/xla/service/transpose_folding_test.cc
index 3139801ea3..cccb8f2fbb 100644
--- a/tensorflow/compiler/xla/service/transpose_folding_test.cc
+++ b/tensorflow/compiler/xla/service/transpose_folding_test.cc
@@ -176,7 +176,7 @@ TEST_F(TransposeFoldingTest, FuseDotWithConstantOperands) {
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build(mul));
HloInstruction* call = module->OutlineExpressionFromComputation(
- {add, sub, mul}, "", entry_computation);
+ {add, sub, mul}, "entry", entry_computation);
EXPECT_EQ(call, entry_computation->root_instruction());
HloComputation* callee_computation = call->to_apply();
// The arguments to the call should be const1, const2, and const3.