diff options
author | Gael Guennebaud <g.gael@free.fr> | 2018-07-19 13:15:40 +0200 |
---|---|---|
committer | Gael Guennebaud <g.gael@free.fr> | 2018-07-19 13:15:40 +0200 |
commit | 7ccb623746ea36013689dbdf61f6ce50948e6c29 (patch) | |
tree | 26b3f99153ead3064a40e7ebcbe7bcb5658ba768 /unsupported/test/cxx11_tensor_reduction.cpp | |
parent | 1f523e73042074fcaea6d9991bc2bb6e4f96a1ab (diff) |
bug #1569: fix Tensor<half>::mean() on AVX with respective unit test.
Diffstat (limited to 'unsupported/test/cxx11_tensor_reduction.cpp')
-rw-r--r-- | unsupported/test/cxx11_tensor_reduction.cpp | 41 |
1 files changed, 21 insertions, 20 deletions
diff --git a/unsupported/test/cxx11_tensor_reduction.cpp b/unsupported/test/cxx11_tensor_reduction.cpp index 4c8a95c34..ff8e18c07 100644 --- a/unsupported/test/cxx11_tensor_reduction.cpp +++ b/unsupported/test/cxx11_tensor_reduction.cpp @@ -53,20 +53,20 @@ static void test_trivial_reductions() { } } -template <int DataLayout> +template <typename Scalar,int DataLayout> static void test_simple_reductions() { - Tensor<float, 4, DataLayout> tensor(2, 3, 5, 7); + Tensor<Scalar, 4, DataLayout> tensor(2, 3, 5, 7); tensor.setRandom(); array<ptrdiff_t, 2> reduction_axis2; reduction_axis2[0] = 1; reduction_axis2[1] = 3; - Tensor<float, 2, DataLayout> result = tensor.sum(reduction_axis2); + Tensor<Scalar, 2, DataLayout> result = tensor.sum(reduction_axis2); VERIFY_IS_EQUAL(result.dimension(0), 2); VERIFY_IS_EQUAL(result.dimension(1), 5); for (int i = 0; i < 2; ++i) { for (int j = 0; j < 5; ++j) { - float sum = 0.0f; + Scalar sum = Scalar(0.0f); for (int k = 0; k < 3; ++k) { for (int l = 0; l < 7; ++l) { sum += tensor(i, k, j, l); @@ -77,7 +77,7 @@ static void test_simple_reductions() { } { - Tensor<float, 0, DataLayout> sum1 = tensor.sum(); + Tensor<Scalar, 0, DataLayout> sum1 = tensor.sum(); VERIFY_IS_EQUAL(sum1.rank(), 0); array<ptrdiff_t, 4> reduction_axis4; @@ -85,7 +85,7 @@ static void test_simple_reductions() { reduction_axis4[1] = 1; reduction_axis4[2] = 2; reduction_axis4[3] = 3; - Tensor<float, 0, DataLayout> sum2 = tensor.sum(reduction_axis4); + Tensor<Scalar, 0, DataLayout> sum2 = tensor.sum(reduction_axis4); VERIFY_IS_EQUAL(sum2.rank(), 0); VERIFY_IS_APPROX(sum1(), sum2()); @@ -98,7 +98,7 @@ static void test_simple_reductions() { VERIFY_IS_EQUAL(result.dimension(1), 7); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 7; ++j) { - float prod = 1.0f; + Scalar prod = Scalar(1.0f); for (int k = 0; k < 2; ++k) { for (int l = 0; l < 5; ++l) { prod *= tensor(k, i, l, j); @@ -109,7 +109,7 @@ static void test_simple_reductions() { } { - Tensor<float, 0, DataLayout> prod1 = tensor.prod(); + Tensor<Scalar, 0, DataLayout> prod1 = tensor.prod(); VERIFY_IS_EQUAL(prod1.rank(), 0); array<ptrdiff_t, 4> reduction_axis4; @@ -117,7 +117,7 @@ static void test_simple_reductions() { reduction_axis4[1] = 1; reduction_axis4[2] = 2; reduction_axis4[3] = 3; - Tensor<float, 0, DataLayout> prod2 = tensor.prod(reduction_axis4); + Tensor<Scalar, 0, DataLayout> prod2 = tensor.prod(reduction_axis4); VERIFY_IS_EQUAL(prod2.rank(), 0); VERIFY_IS_APPROX(prod1(), prod2()); @@ -130,7 +130,7 @@ static void test_simple_reductions() { VERIFY_IS_EQUAL(result.dimension(1), 7); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 7; ++j) { - float max_val = std::numeric_limits<float>::lowest(); + Scalar max_val = std::numeric_limits<Scalar>::lowest(); for (int k = 0; k < 2; ++k) { for (int l = 0; l < 5; ++l) { max_val = (std::max)(max_val, tensor(k, i, l, j)); @@ -141,7 +141,7 @@ static void test_simple_reductions() { } { - Tensor<float, 0, DataLayout> max1 = tensor.maximum(); + Tensor<Scalar, 0, DataLayout> max1 = tensor.maximum(); VERIFY_IS_EQUAL(max1.rank(), 0); array<ptrdiff_t, 4> reduction_axis4; @@ -149,7 +149,7 @@ static void test_simple_reductions() { reduction_axis4[1] = 1; reduction_axis4[2] = 2; reduction_axis4[3] = 3; - Tensor<float, 0, DataLayout> max2 = tensor.maximum(reduction_axis4); + Tensor<Scalar, 0, DataLayout> max2 = tensor.maximum(reduction_axis4); VERIFY_IS_EQUAL(max2.rank(), 0); VERIFY_IS_APPROX(max1(), max2()); @@ -162,7 +162,7 @@ static void test_simple_reductions() { VERIFY_IS_EQUAL(result.dimension(1), 7); for (int i = 0; i < 5; ++i) { for (int j = 0; j < 7; ++j) { - float min_val = (std::numeric_limits<float>::max)(); + Scalar min_val = (std::numeric_limits<Scalar>::max)(); for (int k = 0; k < 2; ++k) { for (int l = 0; l < 3; ++l) { min_val = (std::min)(min_val, tensor(k, l, i, j)); @@ -173,7 +173,7 @@ static void test_simple_reductions() { } { - Tensor<float, 0, DataLayout> min1 = tensor.minimum(); + Tensor<Scalar, 0, DataLayout> min1 = tensor.minimum(); VERIFY_IS_EQUAL(min1.rank(), 0); array<ptrdiff_t, 4> reduction_axis4; @@ -181,7 +181,7 @@ static void test_simple_reductions() { reduction_axis4[1] = 1; reduction_axis4[2] = 2; reduction_axis4[3] = 3; - Tensor<float, 0, DataLayout> min2 = tensor.minimum(reduction_axis4); + Tensor<Scalar, 0, DataLayout> min2 = tensor.minimum(reduction_axis4); VERIFY_IS_EQUAL(min2.rank(), 0); VERIFY_IS_APPROX(min1(), min2()); @@ -194,7 +194,7 @@ static void test_simple_reductions() { VERIFY_IS_EQUAL(result.dimension(1), 7); for (int i = 0; i < 5; ++i) { for (int j = 0; j < 7; ++j) { - float sum = 0.0f; + Scalar sum = Scalar(0.0f); int count = 0; for (int k = 0; k < 2; ++k) { for (int l = 0; l < 3; ++l) { @@ -207,7 +207,7 @@ static void test_simple_reductions() { } { - Tensor<float, 0, DataLayout> mean1 = tensor.mean(); + Tensor<Scalar, 0, DataLayout> mean1 = tensor.mean(); VERIFY_IS_EQUAL(mean1.rank(), 0); array<ptrdiff_t, 4> reduction_axis4; @@ -215,7 +215,7 @@ static void test_simple_reductions() { reduction_axis4[1] = 1; reduction_axis4[2] = 2; reduction_axis4[3] = 3; - Tensor<float, 0, DataLayout> mean2 = tensor.mean(reduction_axis4); + Tensor<Scalar, 0, DataLayout> mean2 = tensor.mean(reduction_axis4); VERIFY_IS_EQUAL(mean2.rank(), 0); VERIFY_IS_APPROX(mean1(), mean2()); @@ -487,8 +487,9 @@ static void test_reduce_middle_dims() { EIGEN_DECLARE_TEST(cxx11_tensor_reduction) { CALL_SUBTEST(test_trivial_reductions<ColMajor>()); CALL_SUBTEST(test_trivial_reductions<RowMajor>()); - CALL_SUBTEST(test_simple_reductions<ColMajor>()); - CALL_SUBTEST(test_simple_reductions<RowMajor>()); + CALL_SUBTEST(( test_simple_reductions<float,ColMajor>() )); + CALL_SUBTEST(( test_simple_reductions<float,RowMajor>() )); + CALL_SUBTEST(( test_simple_reductions<Eigen::half,ColMajor>() )); CALL_SUBTEST(test_reductions_in_expr<ColMajor>()); CALL_SUBTEST(test_reductions_in_expr<RowMajor>()); CALL_SUBTEST(test_full_reductions<ColMajor>()); |