aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/xla/index_util.cc
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2017-12-18 15:14:59 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-12-18 15:18:59 -0800
commit7fd2c7a7f8650a128213b19b13cb6ced65e87696 (patch)
tree5261f1d8b4f1d1184d2bcff97aa66a2e250a85e2 /tensorflow/compiler/xla/index_util.cc
parent2daf4aa01b3d1d837eaaaebcbe4527b521cca7a9 (diff)
[XLA] Add format field to layout
Format will describe the method used to store array data in memory. Currently only DENSE is supported, which represents the way XLA currently stores arrays. Scalars have a DENSE format. Tuples and opaque shapes use INVALID_FORMAT. Adds checks to code that uses minor_to_major to ensure the layout is dense. PiperOrigin-RevId: 179475450
Diffstat (limited to 'tensorflow/compiler/xla/index_util.cc')
-rw-r--r--tensorflow/compiler/xla/index_util.cc11
1 files changed, 5 insertions, 6 deletions
diff --git a/tensorflow/compiler/xla/index_util.cc b/tensorflow/compiler/xla/index_util.cc
index 76c0168f37..2ee23927d8 100644
--- a/tensorflow/compiler/xla/index_util.cc
+++ b/tensorflow/compiler/xla/index_util.cc
@@ -78,7 +78,7 @@ namespace xla {
int64 scale = 1;
int64 linear_index = 0;
bool first = true;
- for (auto dimension : shape.layout().minor_to_major()) {
+ for (auto dimension : LayoutUtil::MinorToMajor(shape)) {
if (first) {
// Avoid two multiplies on the first loop iteration
linear_index = multi_index[dimension];
@@ -110,7 +110,7 @@ namespace xla {
// Accumulated product D{L(0)} * D{L(1)} * ...
int64 divisor = 1;
- for (auto dimension : shape.layout().minor_to_major()) {
+ for (auto dimension : LayoutUtil::MinorToMajor(shape)) {
multi_index[dimension] =
(linear_index / divisor) % shape.dimensions(dimension);
divisor *= shape.dimensions(dimension);
@@ -133,18 +133,17 @@ namespace xla {
/* static */ int64 IndexUtil::GetDimensionStride(const Shape& shape,
int64 dimension) {
- const Layout& layout = shape.layout();
- int64 pdim_size = layout.padded_dimensions_size();
+ int64 pdim_size = LayoutUtil::PaddedDimensions(shape).size();
int64 stride = 1;
DCHECK(pdim_size == 0 || pdim_size == shape.dimensions_size());
- for (auto dim : layout.minor_to_major()) {
+ for (auto dim : LayoutUtil::MinorToMajor(shape)) {
if (dim == dimension) {
break;
}
if (pdim_size == 0) {
stride *= shape.dimensions(dim);
} else {
- stride *= layout.padded_dimensions(dim);
+ stride *= LayoutUtil::PaddedDimension(shape, dim);
}
}
return stride;