aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/xla/service/hlo_parser.cc
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-08-08 03:28:34 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-08-08 03:32:42 -0700
commit15f1fa14a3fd4b63e18539836f6036fef024fce7 (patch)
treec18d09d0b8c450d129a775b7804de855f2c82bb4 /tensorflow/compiler/xla/service/hlo_parser.cc
parentde537122fbd1a49a44bd71e3a24c7b4d4d23c24c (diff)
Remove tile shape from HloSharding
The tile shape can be deduced based on the tile assignment and then HLO shape and by not storing it in the sharding we can give more flexibility to the compiler to decide the data layout. PiperOrigin-RevId: 207860794
Diffstat (limited to 'tensorflow/compiler/xla/service/hlo_parser.cc')
-rw-r--r--tensorflow/compiler/xla/service/hlo_parser.cc15
1 files changed, 2 insertions, 13 deletions
diff --git a/tensorflow/compiler/xla/service/hlo_parser.cc b/tensorflow/compiler/xla/service/hlo_parser.cc
index 93cc884e3a..de73b38dec 100644
--- a/tensorflow/compiler/xla/service/hlo_parser.cc
+++ b/tensorflow/compiler/xla/service/hlo_parser.cc
@@ -1383,7 +1383,6 @@ bool HloParser::ParseSingleSharding(OpSharding* sharding,
bool replicated = false;
std::vector<tensorflow::int64> devices;
std::vector<tensorflow::int64> tile_assignment_dimensions;
- Shape tile_shape;
while (lexer_.GetKind() != TokKind::kRbrace) {
switch (lexer_.GetKind()) {
case TokKind::kw_maximal:
@@ -1434,7 +1433,8 @@ bool HloParser::ParseSingleSharding(OpSharding* sharding,
break;
}
case TokKind::kShape:
- tile_shape = lexer_.GetShapeVal();
+ // TODO(b/112302613): Left here for backward compatibility to ignore the
+ // removed tile shape data.
lexer_.Lex();
break;
case TokKind::kRbrace:
@@ -1449,19 +1449,12 @@ bool HloParser::ParseSingleSharding(OpSharding* sharding,
return Error(loc,
"replicated shardings should not have any devices assigned");
}
- if (!ShapeUtil::Equal(tile_shape, Shape())) {
- return Error(loc,
- "replicated shardings should not have any tile shape set");
- }
sharding->set_type(OpSharding::Type::OpSharding_Type_REPLICATED);
} else if (maximal) {
if (devices.size() != 1) {
return Error(loc,
"maximal shardings should have exactly one device assigned");
}
- if (!ShapeUtil::Equal(tile_shape, Shape())) {
- return Error(loc, "maximal shardings should not have any tile shape set");
- }
sharding->set_type(OpSharding::Type::OpSharding_Type_MAXIMAL);
sharding->add_tile_assignment_devices(devices[0]);
} else {
@@ -1469,9 +1462,6 @@ bool HloParser::ParseSingleSharding(OpSharding* sharding,
return Error(
loc, "non-maximal shardings must have more than one device assigned");
}
- if (ShapeUtil::Equal(tile_shape, Shape())) {
- return Error(loc, "non-maximal shardings should have a tile shape set");
- }
if (tile_assignment_dimensions.empty()) {
return Error(
loc,
@@ -1479,7 +1469,6 @@ bool HloParser::ParseSingleSharding(OpSharding* sharding,
"dimensions");
}
sharding->set_type(OpSharding::Type::OpSharding_Type_OTHER);
- *sharding->mutable_tile_shape() = tile_shape;
for (tensorflow::int64 dim : tile_assignment_dimensions) {
sharding->add_tile_assignment_dimensions(dim);
}