aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/core/kernels/lrn_op_test.cc
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/core/kernels/lrn_op_test.cc')
-rw-r--r--tensorflow/core/kernels/lrn_op_test.cc185
1 files changed, 185 insertions, 0 deletions
diff --git a/tensorflow/core/kernels/lrn_op_test.cc b/tensorflow/core/kernels/lrn_op_test.cc
new file mode 100644
index 0000000000..4c338b6cb3
--- /dev/null
+++ b/tensorflow/core/kernels/lrn_op_test.cc
@@ -0,0 +1,185 @@
+#include <functional>
+#include <memory>
+#include <vector>
+
+#include <gtest/gtest.h>
+#include "tensorflow/core/framework/allocator.h"
+#include "tensorflow/core/framework/fake_input.h"
+#include "tensorflow/core/framework/graph.pb.h"
+#include "tensorflow/core/framework/node_def_builder.h"
+#include "tensorflow/core/framework/op_kernel.h"
+#include "tensorflow/core/framework/tensor_testutil.h"
+#include "tensorflow/core/framework/types.h"
+#include "tensorflow/core/kernels/ops_testutil.h"
+#include "tensorflow/core/kernels/ops_util.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
+#include "tensorflow/core/lib/random/simple_philox.h"
+#include "tensorflow/core/public/tensor.h"
+
+namespace tensorflow {
+
+static const float tol_ = 1e-4;
+
+class LRNFloatTest : public OpsTestBase {
+ protected:
+ LRNFloatTest() : philox_(123, 17), rand_(&philox_) { RequireDefaultOps(); }
+
+ int GetIntAttr(const string& name) {
+ int value;
+ TF_CHECK_OK(GetNodeAttr(*node_def(), name, &value));
+ return value;
+ }
+
+ float GetFloatAttr(const string& name) {
+ float value;
+ TF_CHECK_OK(GetNodeAttr(*node_def(), name, &value));
+ return value;
+ }
+
+ bool Compare() {
+ const auto& input = GetInput(0);
+ const int64 batch_size = input.dim_size(0);
+ const int64 rows = input.dim_size(1);
+ const int64 cols = input.dim_size(2);
+ const int64 depth = input.dim_size(3);
+ const int64 rest = cols * rows * batch_size;
+
+ const int64 depth_radius = GetIntAttr("depth_radius");
+ const float bias = GetFloatAttr("bias");
+ const float alpha = GetFloatAttr("alpha");
+ const float beta = GetFloatAttr("beta");
+
+ Eigen::Tensor<float, 4, Eigen::RowMajor> expected(batch_size, rows, cols,
+ depth);
+ auto out = expected.reshape(Eigen::DSizes<int64, 2>{rest, depth});
+ auto in = input.shaped<float, 2>({rest, depth});
+
+ for (int64 i = 0; i < rest; ++i) {
+ Eigen::Tensor<float, 1, Eigen::RowMajor> out_col(depth);
+ for (int64 d = 0; d < depth; ++d) {
+ float denom = 0.0f;
+ for (int64 r = std::max(0ll, d - depth_radius);
+ r < std::min(depth, d + depth_radius + 1); ++r) {
+ denom += in(i, r) * in(i, r);
+ }
+ denom = std::pow(denom * alpha + bias, beta);
+ out_col(d) = in(i, d) / denom;
+ }
+ out.chip<0>(i) = out_col;
+ }
+ auto actual = GetOutput(0)->tensor<float, 4>();
+ Eigen::Tensor<float, 0, Eigen::RowMajor> sum =
+ ((expected - actual).abs() > actual.constant(tol_))
+ .select(actual.constant(1), actual.constant(0))
+ .sum();
+ return sum() == 0;
+ }
+
+ random::PhiloxRandom philox_;
+ random::SimplePhilox rand_;
+};
+
+TEST_F(LRNFloatTest, Depth96) {
+ ASSERT_OK(NodeDefBuilder("lrn_op", "LRN")
+ .Input(FakeInput())
+ .Attr("depth_radius", 5)
+ .Attr("bias", 1.0f)
+ .Attr("alpha", 0.1f)
+ .Attr("beta", 2.0f)
+ .Finalize(node_def()));
+ ASSERT_OK(InitOp());
+ AddInput<float>(TensorShape({1, 1, 1, 96}),
+ [this](int i) -> float { return i + 1; });
+ ASSERT_OK(RunOpKernel());
+ auto actual = GetOutput(0)->tensor<float, 4>();
+
+ // Output for Node 0 with Value 1:
+ // 1 / (1 + 0.1*(1^2 + 2^2 + 3^2 + 4^2 + 5^2 + 6^2))^2
+ EXPECT_NEAR(1. / (10.1 * 10.1), actual(0, 0, 0, 0), tol_);
+
+ // Output for Node 5 with Value 6:
+ // 6 / (1 + 0.1*(1^2 + 2^2 + 3^2 + 4^2 + 5^2 + 6^2 ... + 11^2))^2
+ EXPECT_NEAR(6. / (51.6 * 51.6), actual(0, 0, 0, 5), tol_);
+
+ // Output for Node 63 with value 64:
+ // 64 / (1 + 0.1*(59^2 + 60^2 + 61^2 + 62^2 + 63^2 + 64^2))^2
+ EXPECT_NEAR(64. / (2272.1 * 2272.1), actual(0, 0, 0, 63), tol_);
+
+ // Output for Node 64 with value 65:
+ // 65 / (1 + 0.1*(65^2 + 66^2 + 67^2 + 68^2 + 69^2 + 70^2))^2
+ EXPECT_NEAR(65. / (2736.5 * 2736.5), actual(0, 0, 0, 64), tol_);
+
+ // Output for Node 95 with value 96:
+ // 96 / (1 + 0.1*(91^2 + 92^2 + 93^2 + 94^2 + 95^2 + 96^2))^2
+ EXPECT_NEAR(96. / (5248.1 * 5248.1), actual(0, 0, 0, 95), tol_);
+ EXPECT_TRUE(Compare());
+}
+
+TEST_F(LRNFloatTest, Depth16) {
+ ASSERT_OK(NodeDefBuilder("lrn_op", "LRN")
+ .Input(FakeInput())
+ .Attr("depth_radius", 5)
+ .Attr("bias", 1.0f)
+ .Attr("alpha", 0.1f)
+ .Attr("beta", 2.0f)
+ .Finalize(node_def()));
+ ASSERT_OK(InitOp());
+ AddInput<float>(TensorShape({1, 1, 1, 16}),
+ [this](int i) -> float { return i + 1; });
+ ASSERT_OK(RunOpKernel());
+ auto actual = GetOutput(0)->tensor<float, 4>();
+
+ // Output for Node 0 with Value 1:
+ // 1 / (1 + 0.1*(1^2 + 2^2 + 3^2 + 4^2 + 5^2 + 6^2))^2
+ EXPECT_NEAR(1. / (10.1 * 10.1), actual(0, 0, 0, 0), tol_);
+
+ // Output for Node 5 with Value 6:
+ // 6 / (1 + 0.1*(1^2 + 2^2 + 3^2 + 4^2 + 5^2 + 6^2 ... + 11^2))^2
+ EXPECT_NEAR(6. / (51.6 * 51.6), actual(0, 0, 0, 5), tol_);
+
+ // Output for Node 15 with value 16:
+ // 16 / (1 + 0.1*(11^2 + 12^2 + 13^2 + 14^2 + 15^2 + 16^2))^2
+ EXPECT_NEAR(16. / (112.1 * 112.1), actual(0, 0, 0, 15), tol_);
+ EXPECT_TRUE(Compare());
+}
+
+static double RndGaussian(random::SimplePhilox* rnd) {
+ // Box-Muller transformation.
+ // See, for example, http://www.taygeta.com/random/gaussian.html
+ double x1, x2;
+ double r;
+ do {
+ x1 = 2 * rnd->RandDouble() - 1;
+ x2 = 2 * rnd->RandDouble() - 1;
+ r = x1 * x1 + x2 * x2;
+ } while (r == 0 || r >= 1.0);
+ double w = sqrt(-2.0 * log(r) / r);
+ return x1 * w;
+}
+
+#define TCASE(NAME, DEPTH, BATCH, DEPTH_RADIUS, BIAS, ALPHA, BETA) \
+ TEST_F(LRNFloatTest, NAME) { \
+ ASSERT_OK(NodeDefBuilder("lrn_op", "LRN") \
+ .Input(FakeInput()) \
+ .Attr("depth_radius", (DEPTH_RADIUS)) \
+ .Attr("bias", (BIAS)) \
+ .Attr("alpha", ((ALPHA) / 10)) \
+ .Attr("beta", (BETA)) \
+ .Finalize(node_def())); \
+ ASSERT_OK(InitOp()); \
+ AddInput<float>(TensorShape({BATCH, 1, 1, DEPTH}), \
+ [this](int i) -> float { return RndGaussian(&rand_); }); \
+ ASSERT_OK(RunOpKernel()); \
+ EXPECT_TRUE(Compare()); \
+ }
+
+// clang-format off
+// DEPTH BATCH DEPTH_RADIUS BIAS ALPHA BETA
+TCASE(T0, 4, 2, 2, 1.0f, 1.0f, 2.0f)
+TCASE(T1, 16, 1, 5, 1.0f, 1.0f, 2.0f)
+TCASE(T2, 16, 32, 2, 1.0f, 2.0f, 1.0f)
+TCASE(T3, 128, 4, 3, 2.0f, 1.0f, 1.0f)
+// clang-format on
+
+#undef TCASE
+} // namespace tensorflow