aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2017-04-27 06:36:29 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-04-27 07:47:50 -0700
commitc83bb86589a6862570e3c68c401c90ab223e2080 (patch)
tree24ee33d189f4a4793668d1e5b535d1e8530727cb
parent79789dd5abe59ec525bc3bdffec82b6af8dbd0d8 (diff)
Replaced user defined matchers by gmock matchers.
Change: 154420642
-rw-r--r--tensorflow/compiler/xla/service/BUILD1
-rw-r--r--tensorflow/compiler/xla/service/cpu/conv_canonicalization_test.cc9
-rw-r--r--tensorflow/compiler/xla/service/hlo_cse_test.cc75
-rw-r--r--tensorflow/compiler/xla/service/layout_assignment_test.cc14
-rw-r--r--tensorflow/compiler/xla/shape_util_test.cc31
-rw-r--r--tensorflow/compiler/xla/tests/prng_test.cc5
6 files changed, 75 insertions, 60 deletions
diff --git a/tensorflow/compiler/xla/service/BUILD b/tensorflow/compiler/xla/service/BUILD
index aed3d72440..e17205be23 100644
--- a/tensorflow/compiler/xla/service/BUILD
+++ b/tensorflow/compiler/xla/service/BUILD
@@ -1386,6 +1386,7 @@ cc_test(
":cpu_plugin",
":hlo",
":hlo_cse",
+ ":hlo_matchers",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:types",
diff --git a/tensorflow/compiler/xla/service/cpu/conv_canonicalization_test.cc b/tensorflow/compiler/xla/service/cpu/conv_canonicalization_test.cc
index f717d57839..b42702dbe1 100644
--- a/tensorflow/compiler/xla/service/cpu/conv_canonicalization_test.cc
+++ b/tensorflow/compiler/xla/service/cpu/conv_canonicalization_test.cc
@@ -20,6 +20,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
+#include "tensorflow/compiler/xla/test.h"
#include "tensorflow/compiler/xla/tests/hlo_test_base.h"
#include "tensorflow/compiler/xla/util.h"
@@ -28,6 +29,8 @@ limitations under the License.
namespace xla {
namespace cpu {
+using ::testing::ElementsAre;
+
class ConvCanonicalizationTest : public HloTestBase {
public:
ConvCanonicalizationTest() {
@@ -96,14 +99,14 @@ TEST_F(ConvCanonicalizationTest, NonCanonicalToCanonical) {
// The input is in CNHW order. input_reshape should produce
// NHWC for the convolution to hit the Eigen fast path.
- EXPECT_TRUE(ContainersEqual(input_reshape->dimensions(), {1, 2, 3, 0}));
+ EXPECT_THAT(input_reshape->dimensions(), ElementsAre(1, 2, 3, 0));
// The kernel is in OIHW order. kernel_reshape should produce
// HWIO for the convolution to hit the Eigen fast path.
- EXPECT_TRUE(ContainersEqual(kernel_reshape->dimensions(), {2, 3, 1, 0}));
+ EXPECT_THAT(kernel_reshape->dimensions(), ElementsAre(2, 3, 1, 0));
// The output of the canonical convolution is in NHWC order (the same as
// input_reshape's order). output_reshape should restore that order to the
// order of the computation root (CNHW).
- EXPECT_TRUE(ContainersEqual(output_reshape->dimensions(), {3, 0, 1, 2}));
+ EXPECT_THAT(output_reshape->dimensions(), ElementsAre(3, 0, 1, 2));
}
TEST_F(ConvCanonicalizationTest, CanonicalStaysTheSame) {
diff --git a/tensorflow/compiler/xla/service/hlo_cse_test.cc b/tensorflow/compiler/xla/service/hlo_cse_test.cc
index ec8161f55f..9444382b52 100644
--- a/tensorflow/compiler/xla/service/hlo_cse_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_cse_test.cc
@@ -25,6 +25,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
+#include "tensorflow/compiler/xla/service/hlo_matchers.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/compiler/xla/shape_util.h"
@@ -36,6 +37,8 @@ limitations under the License.
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/platform/types.h"
+namespace op = xla::testing::opcode_matchers;
+
namespace xla {
namespace {
@@ -88,13 +91,15 @@ TEST_F(HloCseTest, CombineTwoConstantsDifferentLayoutsAndInsensitive) {
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(3, computation->instruction_count());
- EXPECT_NE(add->operand(0), add->operand(1));
+ EXPECT_THAT(add, op::Add(constant1, constant2));
HloCSE cse(/*is_layout_sensitive=*/false);
EXPECT_TRUE(cse.Run(module.get()).ValueOrDie());
EXPECT_EQ(2, computation->instruction_count());
- EXPECT_EQ(add->operand(0), add->operand(1));
+ auto first_operand = add->operand(0);
+ EXPECT_THAT(first_operand, ::testing::AnyOf(constant1, constant2));
+ EXPECT_THAT(add, op::Add(first_operand, first_operand));
auto result = ExecuteAndTransfer(std::move(module), {});
auto expected = LiteralUtil::CreateR2<float>({{2.0, 4.0}, {6.0, 8.0}});
@@ -118,15 +123,13 @@ TEST_F(HloCseTest, CombineTwoConstantsDifferentLayoutsAndSensitive) {
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(3, computation->instruction_count());
- EXPECT_EQ(constant1, add->operand(0));
- EXPECT_EQ(constant2, add->operand(1));
+ EXPECT_THAT(add, op::Add(constant1, constant2));
HloCSE cse(/*is_layout_sensitive=*/true);
EXPECT_FALSE(cse.Run(module.get()).ValueOrDie());
EXPECT_EQ(3, computation->instruction_count());
- EXPECT_EQ(constant1, add->operand(0));
- EXPECT_EQ(constant2, add->operand(1));
+ EXPECT_THAT(add, op::Add(constant1, constant2));
auto result = ExecuteAndTransfer(std::move(module), {});
auto expected = LiteralUtil::CreateR2<float>({{2.0, 4.0}, {6.0, 8.0}});
@@ -185,16 +188,18 @@ TEST_F(HloCseTest, NonscalarConstants) {
auto computation = module.AddEntryComputation(builder.Build());
EXPECT_EQ(4, computation->instruction_count());
+ EXPECT_THAT(tuple,
+ op::Tuple(common_constant1, common_constant2, uncommon_constant));
HloCSE cse(/*is_layout_sensitive=*/false);
EXPECT_TRUE(cse.Run(&module).ValueOrDie());
EXPECT_EQ(3, computation->instruction_count());
-
- EXPECT_EQ(tuple->operand(0), tuple->operand(1));
- EXPECT_EQ(uncommon_constant, tuple->operand(2));
- EXPECT_TRUE(tuple->operand(0) == common_constant1 ||
- tuple->operand(0) == common_constant2);
+ auto first_operand = tuple->operand(0);
+ EXPECT_THAT(first_operand,
+ ::testing::AnyOf(common_constant1, common_constant2));
+ EXPECT_THAT(tuple,
+ op::Tuple(first_operand, first_operand, uncommon_constant));
}
TEST_F(HloCseTest, IdenticalInstructions) {
@@ -215,16 +220,15 @@ TEST_F(HloCseTest, IdenticalInstructions) {
auto computation = module.AddEntryComputation(builder.Build());
EXPECT_EQ(5, computation->instruction_count());
- EXPECT_NE(tuple->operand(0), tuple->operand(1));
- EXPECT_NE(tuple->operand(1), tuple->operand(2));
- EXPECT_NE(tuple->operand(0), tuple->operand(2));
+ EXPECT_THAT(tuple, op::Tuple(exp1, exp2, exp3));
HloCSE cse(/*is_layout_sensitive=*/false);
EXPECT_TRUE(cse.Run(&module).ValueOrDie());
EXPECT_EQ(3, computation->instruction_count());
- EXPECT_EQ(tuple->operand(0), tuple->operand(1));
- EXPECT_EQ(tuple->operand(1), tuple->operand(2));
+ auto first_operand = tuple->operand(0);
+ EXPECT_THAT(first_operand, ::testing::AnyOf(exp1, exp2, exp3));
+ EXPECT_THAT(tuple, op::Tuple(first_operand, first_operand, first_operand));
}
TEST_F(HloCseTest, IdenticalInstructionsDifferentLayoutsSensitive) {
@@ -249,13 +253,13 @@ TEST_F(HloCseTest, IdenticalInstructionsDifferentLayoutsSensitive) {
auto computation = module.AddEntryComputation(builder.Build());
EXPECT_EQ(4, computation->instruction_count());
- EXPECT_NE(tuple->operand(0), tuple->operand(1));
+ EXPECT_THAT(tuple, op::Tuple(exp1, exp2));
HloCSE cse(/*is_layout_sensitive=*/true);
EXPECT_FALSE(cse.Run(&module).ValueOrDie());
EXPECT_EQ(4, computation->instruction_count());
- EXPECT_NE(tuple->operand(0), tuple->operand(1));
+ EXPECT_THAT(tuple, op::Tuple(exp1, exp2));
}
TEST_F(HloCseTest, IdenticalInstructionsDifferentLayoutsInsensitive) {
@@ -280,13 +284,15 @@ TEST_F(HloCseTest, IdenticalInstructionsDifferentLayoutsInsensitive) {
auto computation = module.AddEntryComputation(builder.Build());
EXPECT_EQ(4, computation->instruction_count());
- EXPECT_NE(tuple->operand(0), tuple->operand(1));
+ EXPECT_THAT(tuple, op::Tuple(exp1, exp2));
HloCSE cse(/*is_layout_sensitive=*/false);
EXPECT_TRUE(cse.Run(&module).ValueOrDie());
EXPECT_EQ(3, computation->instruction_count());
- EXPECT_EQ(tuple->operand(0), tuple->operand(1));
+ auto first_operand = tuple->operand(0);
+ EXPECT_THAT(first_operand, ::testing::AnyOf(exp1, exp2));
+ EXPECT_THAT(tuple, op::Tuple(first_operand, first_operand));
}
TEST_F(HloCseTest, IdenticalExpressions) {
@@ -328,14 +334,15 @@ TEST_F(HloCseTest, IdenticalExpressions) {
auto computation = module.AddEntryComputation(builder.Build());
EXPECT_EQ(8, computation->instruction_count());
- EXPECT_NE(tuple->operand(0), tuple->operand(1));
+ EXPECT_THAT(tuple, op::Tuple(op::Add(negate1, exp1), op::Add(negate2, exp2)));
HloCSE cse(/*is_layout_sensitive=*/false);
EXPECT_TRUE(cse.Run(&module).ValueOrDie());
EXPECT_EQ(5, computation->instruction_count());
- EXPECT_EQ(tuple->operand(0), tuple->operand(1));
- EXPECT_EQ(HloOpcode::kAdd, tuple->operand(0)->opcode());
+ auto operand = tuple->operand(0);
+ EXPECT_THAT(tuple, op::Tuple(operand, operand));
+ EXPECT_THAT(operand, op::Add(op::Negate(), op::Exp()));
}
TEST_F(HloCseTest, DoNotCombineRng) {
@@ -351,12 +358,16 @@ TEST_F(HloCseTest, DoNotCombineRng) {
auto rng2 = builder.AddInstruction(HloInstruction::CreateRng(
ShapeUtil::MakeShape(F32, {}), RandomDistribution::RNG_UNIFORM,
{constant1, constant2}));
+
builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, rng1, rng2));
auto module = MakeUnique<HloModule>(TestName());
auto computation = module->AddEntryComputation(builder.Build());
+ HloInstruction* root = computation->root_instruction();
+ EXPECT_THAT(root, op::Add(rng1, rng2));
+
uint32 count_before = computation->instruction_count();
HloCSE cse(/*is_layout_sensitive=*/false);
@@ -364,11 +375,8 @@ TEST_F(HloCseTest, DoNotCombineRng) {
uint32 count_after = computation->instruction_count();
EXPECT_EQ(count_before, count_after);
- HloInstruction* root = computation->root_instruction();
- EXPECT_EQ(root->opcode(), HloOpcode::kAdd);
- EXPECT_EQ(root->operand(0)->opcode(), HloOpcode::kRng);
- EXPECT_EQ(root->operand(1)->opcode(), HloOpcode::kRng);
- EXPECT_NE(root->operand(0), root->operand(1));
+ root = computation->root_instruction();
+ EXPECT_THAT(root, op::Add(rng1, rng2));
}
// TODO(b/28245743): Handle impure functions correctly in CSE.
@@ -412,16 +420,17 @@ TEST_F(HloCseTest, DISABLED_DoNotCombineCallsToImpureFunctions) {
}
EXPECT_EQ(4, computation->instruction_count());
+ HloInstruction* root = computation->root_instruction();
+ EXPECT_THAT(root, op::Add(op::Map(), op::Map()));
HloCSE cse(/*is_layout_sensitive=*/false);
EXPECT_TRUE(cse.Run(module.get()).ValueOrDie());
EXPECT_EQ(4, computation->instruction_count());
- HloInstruction* root = computation->root_instruction();
- EXPECT_EQ(root->opcode(), HloOpcode::kAdd);
- EXPECT_EQ(root->operand(0)->opcode(), HloOpcode::kMap);
- EXPECT_EQ(root->operand(1)->opcode(), HloOpcode::kMap);
- EXPECT_NE(root->operand(0), root->operand(1));
+ root = computation->root_instruction();
+ auto operand = root->operand(0)->operand(0);
+ EXPECT_THAT(operand, op::Map());
+ EXPECT_THAT(root, op::Add(operand, operand));
}
} // namespace
diff --git a/tensorflow/compiler/xla/service/layout_assignment_test.cc b/tensorflow/compiler/xla/service/layout_assignment_test.cc
index b6451738bd..dd72566ac0 100644
--- a/tensorflow/compiler/xla/service/layout_assignment_test.cc
+++ b/tensorflow/compiler/xla/service/layout_assignment_test.cc
@@ -45,6 +45,8 @@ namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
+using ::testing::ElementsAre;
+
class LayoutAssignmentTest : public HloTestBase {
protected:
void AssignLayouts(HloModule* module,
@@ -421,8 +423,8 @@ TEST_F(LayoutAssignmentTest, BroadcastAndTranspose) {
ShapeLayout(output_shape_with_layout);
AssignLayouts(&module, &computation_layout);
- EXPECT_TRUE(
- ContainersEqual(broadcast->shape().layout().minor_to_major(), {0, 1, 2}));
+ EXPECT_THAT(broadcast->shape().layout().minor_to_major(),
+ ElementsAre(0, 1, 2));
}
TEST_F(LayoutAssignmentTest, ReshapeOperandHasMultipleUsers) {
@@ -474,11 +476,9 @@ TEST_F(LayoutAssignmentTest, ReshapeOperandHasMultipleUsers) {
{transpose_shape_with_layout, broadcast2_shape_with_layout}));
AssignLayouts(&module, &computation_layout);
- EXPECT_TRUE(
- ContainersEqual(broadcast->shape().layout().minor_to_major(), {0, 1}));
- EXPECT_TRUE(
- ContainersEqual(transpose->shape().layout().minor_to_major(), {1, 0}));
- EXPECT_TRUE(ContainersEqual(tanh->shape().layout().minor_to_major(), {0, 1}));
+ EXPECT_THAT(broadcast->shape().layout().minor_to_major(), ElementsAre(0, 1));
+ EXPECT_THAT(transpose->shape().layout().minor_to_major(), ElementsAre(1, 0));
+ EXPECT_THAT(tanh->shape().layout().minor_to_major(), ElementsAre(0, 1));
}
// Add test which fails due to copy tuple.
diff --git a/tensorflow/compiler/xla/shape_util_test.cc b/tensorflow/compiler/xla/shape_util_test.cc
index b0a4b0c9a7..1f1f71d97c 100644
--- a/tensorflow/compiler/xla/shape_util_test.cc
+++ b/tensorflow/compiler/xla/shape_util_test.cc
@@ -24,6 +24,8 @@ limitations under the License.
namespace xla {
namespace {
+using ::testing::ElementsAre;
+
TEST(ShapeUtilTest, GetDimensionHelperCanNegativeIndex) {
Shape matrix = ShapeUtil::MakeShape(F32, {2, 3});
EXPECT_EQ(3, ShapeUtil::GetDimension(matrix, -1));
@@ -446,21 +448,21 @@ TEST(ShapeUtilTest, InsertedOrDeleted1SizedDimensions) {
TEST(ShapeUtilTest, DimensionsUnmodifiedByReshape_1x1x1x1_to_1x1x1) {
// All output dimensions should be unmodified. One of the input dimensions is
// modified because the input rank is larger by one.
- EXPECT_EQ(3,
- ShapeUtil::DimensionsUnmodifiedByReshape(
- ShapeUtil::MakeShape(S32, {1, 1, 1, 1}),
- ShapeUtil::MakeShape(S32, {1, 1, 1}))
- .size());
+ EXPECT_THAT(ShapeUtil::DimensionsUnmodifiedByReshape(
+ ShapeUtil::MakeShape(S32, {1, 1, 1, 1}),
+ ShapeUtil::MakeShape(S32, {1, 1, 1})),
+ ElementsAre(std::make_pair(0, 0), std::make_pair(1, 1),
+ std::make_pair(2, 2)));
}
TEST(ShapeUtilTest, DimensionsUnmodifiedByReshape_1x1x1_to_1x1x1x1) {
// All input dimensions should be unmodified. One of the output dimensions is
// modified because the output rank is larger by one.
- EXPECT_EQ(3,
- ShapeUtil::DimensionsUnmodifiedByReshape(
- ShapeUtil::MakeShape(S32, {1, 1, 1}),
- ShapeUtil::MakeShape(S32, {1, 1, 1, 1}))
- .size());
+ EXPECT_THAT(ShapeUtil::DimensionsUnmodifiedByReshape(
+ ShapeUtil::MakeShape(S32, {1, 1, 1}),
+ ShapeUtil::MakeShape(S32, {1, 1, 1, 1})),
+ ElementsAre(std::make_pair(0, 0), std::make_pair(1, 1),
+ std::make_pair(2, 2)));
}
TEST(ShapeUtilTest, DimensionsUnmodifiedByReshape_4x1x3x5x6x7_to_2x6x1x5x1x42) {
@@ -468,11 +470,10 @@ TEST(ShapeUtilTest, DimensionsUnmodifiedByReshape_4x1x3x5x6x7_to_2x6x1x5x1x42) {
// 4, 1, 3, 5, 6, 7
// |
// 2, 6, 1, 5, 1, 42
- EXPECT_TRUE(
- ContainersEqual(ShapeUtil::DimensionsUnmodifiedByReshape(
- ShapeUtil::MakeShape(S32, {4, 1, 3, 5, 6, 7}),
- ShapeUtil::MakeShape(S32, {2, 6, 1, 5, 1, 42})),
- std::vector<std::pair<int64, int64>>({{3, 3}})));
+ EXPECT_THAT(ShapeUtil::DimensionsUnmodifiedByReshape(
+ ShapeUtil::MakeShape(S32, {4, 1, 3, 5, 6, 7}),
+ ShapeUtil::MakeShape(S32, {2, 6, 1, 5, 1, 42})),
+ ElementsAre(std::make_pair(3, 3)));
}
TEST(ShapeUtilTest, ReshapeIsBitcast_3x4_6x2) {
diff --git a/tensorflow/compiler/xla/tests/prng_test.cc b/tensorflow/compiler/xla/tests/prng_test.cc
index 0cd0f97b06..5a6aa467e5 100644
--- a/tensorflow/compiler/xla/tests/prng_test.cc
+++ b/tensorflow/compiler/xla/tests/prng_test.cc
@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/primitive_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
+#include "tensorflow/compiler/xla/test.h"
#include "tensorflow/compiler/xla/tests/client_library_test_base.h"
#include "tensorflow/compiler/xla/tests/test_macros.h"
#include "tensorflow/compiler/xla/util.h"
@@ -55,7 +56,7 @@ void PrngTest::UniformTest(T a, T b, tensorflow::gtl::ArraySlice<int64> dims) {
SetSeed(42);
auto actual = ExecuteAndTransferOrDie(&builder, /*arguments=*/{});
- EXPECT_TRUE(ContainersEqual(dims, actual->shape().dimensions()));
+ EXPECT_THAT(dims, ::testing::ElementsAreArray(actual->shape().dimensions()));
LiteralUtil::EachCell<T>(*actual,
[=](tensorflow::gtl::ArraySlice<int64>, T value) {
EXPECT_LE(a, value);
@@ -75,7 +76,7 @@ void PrngTest::BernoulliTest(float p, tensorflow::gtl::ArraySlice<int64> dims) {
auto actual,
client_->ExecuteAndTransfer(computation, /*arguments=*/{},
&execution_options));
- EXPECT_TRUE(ContainersEqual(dims, actual->shape().dimensions()));
+ EXPECT_THAT(dims, ::testing::ElementsAreArray(actual->shape().dimensions()));
int32 sum = 0;
LiteralUtil::EachCell<uint32>(
*actual, [&sum](tensorflow::gtl::ArraySlice<int64>, uint32 value) {