diff options
author | Peter Hawkins <phawkins@google.com> | 2017-01-09 12:04:37 -0800 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2017-01-09 12:26:35 -0800 |
commit | 1e67c90e2caceeff82d09793d1ef5fa0300d219b (patch) | |
tree | 6567ea8b0fa01fcfcd608b7e4c636865d33c7032 /tensorflow/compiler/xla/index_util_test.cc | |
parent | 7ad7e4dfae4344d6b955b5eb61dc4b6bb792f1b3 (diff) |
Initial open-source release of XLA: Accelerated Linear Algebra.
XLA is a compiler-based linear algebra execution engine that targets CPUs, GPUs and custom accelerators.
XLA is still experimental; we are releasing it early to get the community involved.
Change: 143990941
Diffstat (limited to 'tensorflow/compiler/xla/index_util_test.cc')
-rw-r--r-- | tensorflow/compiler/xla/index_util_test.cc | 159 |
1 files changed, 159 insertions, 0 deletions
diff --git a/tensorflow/compiler/xla/index_util_test.cc b/tensorflow/compiler/xla/index_util_test.cc new file mode 100644 index 0000000000..85259b33f0 --- /dev/null +++ b/tensorflow/compiler/xla/index_util_test.cc @@ -0,0 +1,159 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/compiler/xla/index_util.h" + +#include <initializer_list> + +#include "tensorflow/compiler/xla/shape_util.h" +#include "tensorflow/compiler/xla/test_helpers.h" +#include "tensorflow/compiler/xla/xla_data.pb.h" +#include "tensorflow/core/platform/test.h" + +namespace xla { +namespace { + +void SetMinorToMajorLayout(Shape* shape, + std::initializer_list<int64> dimensions) { + shape->mutable_layout()->clear_minor_to_major(); + for (auto dimension : dimensions) { + shape->mutable_layout()->add_minor_to_major(dimension); + } +} + +TEST(IndexUtilTest, VectorIndexing) { + // Vectors are trivially laid out and the linear index should always be the + // same as the "multidimensional" index. + Shape vector_shape = ShapeUtil::MakeShape(F32, {100}); + EXPECT_EQ(42, + IndexUtil::MultidimensionalIndexToLinearIndex(vector_shape, {42})); + std::vector<int64> multi_index = + IndexUtil::LinearIndexToMultidimensionalIndex(vector_shape, 42); + EXPECT_EQ(1, multi_index.size()); + EXPECT_EQ(42, multi_index[0]); +} + +TEST(IndexUtilTest, MatrixIndexingRowMajor) { + // Set layout to [0, 1]. That is, row major. + Shape matrix_shape_01 = ShapeUtil::MakeShape(F32, {10, 20}); + SetMinorToMajorLayout(&matrix_shape_01, {0, 1}); + + // If index is {a, b} then linear index should be: a + b * 10 + EXPECT_EQ(0, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_01, + {0, 0})); + EXPECT_EQ(199, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_01, + {9, 19})); + EXPECT_EQ(53, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_01, + {3, 5})); + EXPECT_EQ(std::vector<int64>({3, 5}), + IndexUtil::LinearIndexToMultidimensionalIndex(matrix_shape_01, 53)); +} + +TEST(IndexUtilTest, MatrixIndexingColumnMajor) { + // Set layout to [1, 0]. That is, column major. + Shape matrix_shape_10 = ShapeUtil::MakeShape(F32, {10, 20}); + SetMinorToMajorLayout(&matrix_shape_10, {1, 0}); + + // If index is {a, b} then linear index should be: a * 20 + b + EXPECT_EQ(0, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_10, + {0, 0})); + EXPECT_EQ(199, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_10, + {9, 19})); + EXPECT_EQ(65, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_10, + {3, 5})); + EXPECT_EQ(std::vector<int64>({3, 5}), + IndexUtil::LinearIndexToMultidimensionalIndex(matrix_shape_10, 65)); +} + +TEST(IndexUtilTest, ThreeDArrayIndexing210) { + // Set layout to [2, 1, 0]. That is, column major. + Shape shape_210 = ShapeUtil::MakeShape(F32, {10, 20, 30}); + SetMinorToMajorLayout(&shape_210, {2, 1, 0}); + + // If index is {a, b, c} then linear index should be: + // a * 20 * 30 + b * 30 + c + EXPECT_EQ(1957, IndexUtil::MultidimensionalIndexToLinearIndex(shape_210, + {3, 5, 7})); + EXPECT_EQ(5277, IndexUtil::MultidimensionalIndexToLinearIndex(shape_210, + {8, 15, 27})); +} + +TEST(IndexUtilTest, ThreeDArrayIndexing120) { + // Set layout to [1, 2, 0] + Shape shape_120 = ShapeUtil::MakeShape(F32, {10, 20, 30}); + SetMinorToMajorLayout(&shape_120, {1, 2, 0}); + + // If index is {a, b, c} then linear index should be: + // a * 20 * 30 + b + c * 20 + EXPECT_EQ(1945, IndexUtil::MultidimensionalIndexToLinearIndex(shape_120, + {3, 5, 7})); + EXPECT_EQ(5355, IndexUtil::MultidimensionalIndexToLinearIndex(shape_120, + {8, 15, 27})); +} + +TEST(IndexUtilTest, FourDArrayIndexing3210) { + // Set layout to [3, 2, 1,0]. That is, column major. + Shape shape_3210 = ShapeUtil::MakeShape(F32, {10, 20, 30, 40}); + SetMinorToMajorLayout(&shape_3210, {3, 2, 1, 0}); + + // If index is {a, b, c, d} then linear index should be: + // a * 20 * 30 * 40 + b * 30 * 40 + c * 40 + d + EXPECT_EQ(78289, IndexUtil::MultidimensionalIndexToLinearIndex(shape_3210, + {3, 5, 7, 9})); + EXPECT_EQ(211113, IndexUtil::MultidimensionalIndexToLinearIndex( + shape_3210, {8, 15, 27, 33})); +} + +TEST(IndexUtilTest, LinearToMultiToLinear) { + // Verify that converting a linear index to a multidimensional index and back + // always returns the same value for different crazy shapes. Shape has + // 1440000000 elements. Inputs are randomly-ish selected. + std::vector<int64> linear_indexes = {0, 1439999999, 1145567336, + 43883404, 617295214, 1117613654}; + + std::vector<std::initializer_list<int64>> minor_to_major_orders; + minor_to_major_orders.push_back({6, 5, 4, 3, 2, 1, 0}); + minor_to_major_orders.push_back({0, 1, 2, 3, 4, 5, 6}); + minor_to_major_orders.push_back({4, 5, 1, 2, 6, 0, 3}); + + for (auto minor_to_major_order : minor_to_major_orders) { + Shape shape = ShapeUtil::MakeShape(F32, {10, 20, 30, 40, 30, 20, 10}); + SetMinorToMajorLayout(&shape, minor_to_major_order); + for (auto linear_index : linear_indexes) { + std::vector<int64> multi_index = + IndexUtil::LinearIndexToMultidimensionalIndex(shape, linear_index); + EXPECT_EQ(linear_index, IndexUtil::MultidimensionalIndexToLinearIndex( + shape, multi_index)); + } + } +} + +TEST(IndexUtilTest, BumpIndices2x2) { + auto shape = ShapeUtil::MakeShape(S32, {2, 2}); + std::vector<int64> indices = {0, 0}; + EXPECT_TRUE(IndexUtil::BumpIndices(shape, &indices)); + EXPECT_MATCH(indices, + testing::VectorMatcher<int64>(std::vector<int64>{0, 1})); + EXPECT_TRUE(IndexUtil::BumpIndices(shape, &indices)); + EXPECT_MATCH(indices, + testing::VectorMatcher<int64>(std::vector<int64>{1, 0})); + EXPECT_TRUE(IndexUtil::BumpIndices(shape, &indices)); + EXPECT_MATCH(indices, + testing::VectorMatcher<int64>(std::vector<int64>{1, 1})); + EXPECT_FALSE(IndexUtil::BumpIndices(shape, &indices)); +} + +} // namespace +} // namespace xla |