aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/xla/service/layout_assignment.cc
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2017-12-18 15:14:59 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-12-18 15:18:59 -0800
commit7fd2c7a7f8650a128213b19b13cb6ced65e87696 (patch)
tree5261f1d8b4f1d1184d2bcff97aa66a2e250a85e2 /tensorflow/compiler/xla/service/layout_assignment.cc
parent2daf4aa01b3d1d837eaaaebcbe4527b521cca7a9 (diff)
[XLA] Add format field to layout
Format will describe the method used to store array data in memory. Currently only DENSE is supported, which represents the way XLA currently stores arrays. Scalars have a DENSE format. Tuples and opaque shapes use INVALID_FORMAT. Adds checks to code that uses minor_to_major to ensure the layout is dense. PiperOrigin-RevId: 179475450
Diffstat (limited to 'tensorflow/compiler/xla/service/layout_assignment.cc')
-rw-r--r--tensorflow/compiler/xla/service/layout_assignment.cc14
1 files changed, 8 insertions, 6 deletions
diff --git a/tensorflow/compiler/xla/service/layout_assignment.cc b/tensorflow/compiler/xla/service/layout_assignment.cc
index b598c765fc..641712fdfa 100644
--- a/tensorflow/compiler/xla/service/layout_assignment.cc
+++ b/tensorflow/compiler/xla/service/layout_assignment.cc
@@ -527,9 +527,11 @@ Status CheckCallLayout(HloInstruction* call,
Status CheckCustomCallLayout(HloInstruction* custom_call) {
for (const HloInstruction* operand : custom_call->operands()) {
TF_RET_CHECK(
+ ShapeUtil::IsOpaque(operand->shape()) ||
LayoutUtil::IsMonotonicWithDim0Major(operand->shape().layout()));
}
TF_RET_CHECK(
+ ShapeUtil::IsOpaque(custom_call->shape()) ||
LayoutUtil::IsMonotonicWithDim0Major(custom_call->shape().layout()));
return Status::OK();
}
@@ -708,8 +710,8 @@ std::unique_ptr<Layout> LayoutAssignment::ChooseOperandLayoutFromOutputLayout(
int64 operand_no) {
const HloInstruction* operand = instruction->operand(operand_no);
- CHECK(ShapeUtil::IsArray(instruction->shape()) &&
- ShapeUtil::IsArray(operand->shape()));
+ CHECK(ShapeUtil::IsArray(instruction->shape()));
+ CHECK(ShapeUtil::IsArray(operand->shape()));
if (instruction->IsElementwiseOnOperand(operand_no) &&
!ShapeUtil::IsScalar(operand->shape()) &&
@@ -739,7 +741,7 @@ std::unique_ptr<Layout> LayoutAssignment::ChooseOperandLayoutFromOutputLayout(
const Shape& output_shape = instruction->shape();
Shape output_shape_with_layout = ShapeUtil::MakeShapeWithLayout(
output_shape.element_type(), AsInt64Slice(output_shape.dimensions()),
- AsInt64Slice(output_layout.minor_to_major()));
+ LayoutUtil::MinorToMajor(output_layout));
Shape operand_shape = operand->shape();
*operand_shape.mutable_layout() =
LayoutUtil::GetDefaultLayoutForShape(operand_shape);
@@ -768,7 +770,7 @@ std::unique_ptr<Layout> LayoutAssignment::ChooseOperandLayoutFromOutputLayout(
int64 rank = ShapeUtil::Rank(instruction->shape());
std::vector<int64> new_minor_to_major(rank);
for (int64 i = 0; i < rank; ++i) {
- int64 output_dim = output_layout.minor_to_major(i);
+ int64 output_dim = LayoutUtil::Minor(output_layout, i);
int64 operand_dim = instruction->dimensions(output_dim);
new_minor_to_major[i] = operand_dim;
}
@@ -811,7 +813,7 @@ std::unique_ptr<Layout> LayoutAssignment::ChooseOutputLayoutFromOperandLayout(
Shape operand_shape_with_layout = ShapeUtil::MakeShapeWithLayout(
operand->shape().element_type(),
AsInt64Slice(operand->shape().dimensions()),
- AsInt64Slice(operand_layout.minor_to_major()));
+ LayoutUtil::MinorToMajor(operand_layout));
Shape output_shape = user->shape();
*output_shape.mutable_layout() =
LayoutUtil::GetDefaultLayoutForShape(output_shape);
@@ -841,7 +843,7 @@ std::unique_ptr<Layout> LayoutAssignment::ChooseOutputLayoutFromOperandLayout(
std::vector<int64> new_minor_to_major(rank);
auto inverse_dimensions = InversePermutation(user->dimensions());
for (int64 i = 0; i < rank; ++i) {
- int64 operand_dim = operand_layout.minor_to_major(i);
+ int64 operand_dim = LayoutUtil::Minor(operand_layout, i);
int64 user_dim = inverse_dimensions[operand_dim];
new_minor_to_major[i] = user_dim;
}