aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/test
diff options
context:
space:
mode:
authorGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2017-01-30 15:25:57 -0800
committerGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2017-01-30 15:25:57 -0800
commitfbc39fd02c642119a2c49e517e1cd6e8fa1a008f (patch)
tree6cf2142e4b740eb440c577ca08114e4e24912f91 /unsupported/test
parent82ce92419e25d8b9902c0f39e2e3b01787bf8687 (diff)
parent63de19c0004933c7b2b1e418292b9f2ae6c138f4 (diff)
Merge latest changes from upstream
Diffstat (limited to 'unsupported/test')
-rw-r--r--unsupported/test/CMakeLists.txt11
-rw-r--r--unsupported/test/cxx11_tensor_expr.cpp46
2 files changed, 57 insertions, 0 deletions
diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt
index cf07b033d..9fa479f52 100644
--- a/unsupported/test/CMakeLists.txt
+++ b/unsupported/test/CMakeLists.txt
@@ -21,6 +21,17 @@ include_directories(../../test ../../unsupported ../../Eigen
find_package (Threads)
+find_package(Xsmm)
+if(XSMM_FOUND)
+ add_definitions("-DEIGEN_USE_LIBXSMM")
+ include_directories(${XSMM_INCLUDES})
+ link_directories(${XSMM_LIBRARIES})
+ set(EXTERNAL_LIBS ${EXTERNAL_LIBS} xsmm)
+ ei_add_property(EIGEN_TESTED_BACKENDS "Xsmm, ")
+else(XSMM_FOUND)
+ ei_add_property(EIGEN_MISSING_BACKENDS "Xsmm, ")
+endif(XSMM_FOUND)
+
find_package(GoogleHash)
if(GOOGLEHASH_FOUND)
add_definitions("-DEIGEN_GOOGLEHASH_SUPPORT")
diff --git a/unsupported/test/cxx11_tensor_expr.cpp b/unsupported/test/cxx11_tensor_expr.cpp
index 77e24cb67..129b4e659 100644
--- a/unsupported/test/cxx11_tensor_expr.cpp
+++ b/unsupported/test/cxx11_tensor_expr.cpp
@@ -300,6 +300,51 @@ static void test_select()
}
}
+template <typename Scalar>
+void test_minmax_nan_propagation_templ() {
+ for (int size = 1; size < 17; ++size) {
+ const Scalar kNan = std::numeric_limits<Scalar>::quiet_NaN();
+ Tensor<Scalar, 1> vec_nan(size);
+ Tensor<Scalar, 1> vec_zero(size);
+ Tensor<Scalar, 1> vec_res(size);
+ vec_nan.setConstant(kNan);
+ vec_zero.setZero();
+ vec_res.setZero();
+
+ // Test that we propagate NaNs in the tensor when applying the
+ // cwiseMax(scalar) operator, which is used for the Relu operator.
+ vec_res = vec_nan.cwiseMax(Scalar(0));
+ for (int i = 0; i < size; ++i) {
+ VERIFY((numext::isnan)(vec_res(i)));
+ }
+
+ // Test that NaNs do not propagate if we reverse the arguments.
+ vec_res = vec_zero.cwiseMax(kNan);
+ for (int i = 0; i < size; ++i) {
+ VERIFY_IS_EQUAL(vec_res(i), Scalar(0));
+ }
+
+ // Test that we propagate NaNs in the tensor when applying the
+ // cwiseMin(scalar) operator.
+ vec_res.setZero();
+ vec_res = vec_nan.cwiseMin(Scalar(0));
+ for (int i = 0; i < size; ++i) {
+ VERIFY((numext::isnan)(vec_res(i)));
+ }
+
+ // Test that NaNs do not propagate if we reverse the arguments.
+ vec_res = vec_zero.cwiseMin(kNan);
+ for (int i = 0; i < size; ++i) {
+ VERIFY_IS_EQUAL(vec_res(i), Scalar(0));
+ }
+ }
+}
+
+static void test_minmax_nan_propagation()
+{
+ test_minmax_nan_propagation_templ<float>();
+ test_minmax_nan_propagation_templ<double>();
+}
void test_cxx11_tensor_expr()
{
@@ -311,4 +356,5 @@ void test_cxx11_tensor_expr()
CALL_SUBTEST(test_functors());
CALL_SUBTEST(test_type_casting());
CALL_SUBTEST(test_select());
+ CALL_SUBTEST(test_minmax_nan_propagation());
}