aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported
diff options
context:
space:
mode:
authorGravatar Rasmus Munk Larsen <rmlarsen@google.com>2020-04-24 17:29:25 -0700
committerGravatar Rasmus Munk Larsen <rmlarsen@google.com>2020-04-28 16:12:47 +0000
commitab773c7e914633ec4a3ee1f7cdea8b168d3bce1a (patch)
tree5a82a3071e3c5de39e934ce144023c20f62b2de4 /unsupported
parentb47c7779937c1984b7cd2f1d2f8df33d67c396f7 (diff)
Extend support for Packet16b:
* Add ptranspose<*,4> to support matmul and add unit test for Matrix<bool> * Matrix<bool> * work around a bug in slicing of Tensor<bool>. * Add tensor tests This speeds up matmul for boolean matrices by about 10x name old time/op new time/op delta BM_MatMul<bool>/8 267ns ± 0% 479ns ± 0% +79.25% (p=0.008 n=5+5) BM_MatMul<bool>/32 6.42µs ± 0% 0.87µs ± 0% -86.50% (p=0.008 n=5+5) BM_MatMul<bool>/64 43.3µs ± 0% 5.9µs ± 0% -86.42% (p=0.008 n=5+5) BM_MatMul<bool>/128 315µs ± 0% 44µs ± 0% -85.98% (p=0.008 n=5+5) BM_MatMul<bool>/256 2.41ms ± 0% 0.34ms ± 0% -85.68% (p=0.008 n=5+5) BM_MatMul<bool>/512 18.8ms ± 0% 2.7ms ± 0% -85.53% (p=0.008 n=5+5) BM_MatMul<bool>/1k 149ms ± 0% 22ms ± 0% -85.40% (p=0.008 n=5+5)
Diffstat (limited to 'unsupported')
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h9
-rw-r--r--unsupported/test/cxx11_tensor_block_eval.cpp94
-rw-r--r--unsupported/test/cxx11_tensor_block_io.cpp10
-rw-r--r--unsupported/test/cxx11_tensor_contraction.cpp68
-rw-r--r--unsupported/test/cxx11_tensor_expr.cpp25
-rw-r--r--unsupported/test/cxx11_tensor_morphing.cpp113
6 files changed, 180 insertions, 139 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h
index d04dcae17..f107d1b19 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h
@@ -456,7 +456,9 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi
// slice offsets and sizes.
IsAligned = false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
- BlockAccess = TensorEvaluator<ArgType, Device>::BlockAccess,
+ BlockAccess = TensorEvaluator<ArgType, Device>::BlockAccess &&
+ // FIXME: Temporary workaround for bug in slicing of bool tensors.
+ !internal::is_same<typename internal::remove_const<Scalar>::type, bool>::value,
PreferBlockAccess = true,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false,
@@ -525,7 +527,6 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
-
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) {
m_impl.evalSubExprsIfNeeded(NULL);
if (!NumTraits<typename internal::remove_const<Scalar>::type>::RequireInitialization
@@ -559,14 +560,14 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi
}
return true;
}
-
+
#ifdef EIGEN_USE_THREADS
template <typename EvalSubExprsCallback>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(
EvaluatorPointerType data, EvalSubExprsCallback done) {
m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); });
}
-#endif // EIGEN_USE_THREADS
+#endif // EIGEN_USE_THREADS
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_impl.cleanup();
diff --git a/unsupported/test/cxx11_tensor_block_eval.cpp b/unsupported/test/cxx11_tensor_block_eval.cpp
index 226c495aa..a7a49fa1f 100644
--- a/unsupported/test/cxx11_tensor_block_eval.cpp
+++ b/unsupported/test/cxx11_tensor_block_eval.cpp
@@ -233,7 +233,7 @@ static void test_eval_tensor_binary_expr_block() {
rhs.setRandom();
VerifyBlockEvaluator<T, NumDims, Layout>(
- lhs + rhs, [&dims]() { return RandomBlock<Layout>(dims, 1, 10); });
+ lhs * rhs, [&dims]() { return RandomBlock<Layout>(dims, 1, 10); });
}
template <typename T, int NumDims, int Layout>
@@ -274,7 +274,7 @@ static void test_eval_tensor_broadcast() {
// Check that desc.destination() memory is not shared between two broadcast
// materializations.
VerifyBlockEvaluator<T, NumDims, Layout>(
- input.broadcast(bcast) + input.square().broadcast(bcast),
+ input.broadcast(bcast) * input.square().broadcast(bcast),
[&bcasted_dims]() { return SkewedInnerBlock<Layout>(bcasted_dims); });
}
@@ -509,7 +509,7 @@ static void test_eval_tensor_reshape_with_bcast() {
DSizes<Index, 2> dims(dim, dim);
VerifyBlockEvaluator<T, 2, Layout>(
- lhs.reshape(reshapeLhs).broadcast(bcastLhs) +
+ lhs.reshape(reshapeLhs).broadcast(bcastLhs) *
rhs.reshape(reshapeRhs).broadcast(bcastRhs),
[dims]() { return SkewedInnerBlock<Layout, 2>(dims); });
}
@@ -529,11 +529,11 @@ static void test_eval_tensor_forced_eval() {
DSizes<Index, 2> dims(dim, dim);
VerifyBlockEvaluator<T, 2, Layout>(
- (lhs.broadcast(bcastLhs) + rhs.broadcast(bcastRhs)).eval().reshape(dims),
+ (lhs.broadcast(bcastLhs) * rhs.broadcast(bcastRhs)).eval().reshape(dims),
[dims]() { return SkewedInnerBlock<Layout, 2>(dims); });
VerifyBlockEvaluator<T, 2, Layout>(
- (lhs.broadcast(bcastLhs) + rhs.broadcast(bcastRhs)).eval().reshape(dims),
+ (lhs.broadcast(bcastLhs) * rhs.broadcast(bcastRhs)).eval().reshape(dims),
[dims]() { return RandomBlock<Layout, 2>(dims, 1, 50); });
}
@@ -755,7 +755,39 @@ static void test_assign_to_tensor_shuffle() {
#define CALL_SUBTEST_PART(PART) \
CALL_SUBTEST_##PART
-#define CALL_SUBTESTS_DIMS_LAYOUTS(PART, NAME) \
+#define CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(PART, NAME) \
+ CALL_SUBTEST_PART(PART)((NAME<float, 1, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<float, 2, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<float, 3, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<float, 4, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<float, 5, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<float, 1, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<float, 2, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<float, 4, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<float, 4, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<float, 5, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<int, 1, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<int, 2, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<int, 3, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<int, 4, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<int, 5, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<int, 1, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<int, 2, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<int, 4, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<int, 4, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<int, 5, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, 1, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, 2, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, 3, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, 4, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, 5, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, 1, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, 2, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, 4, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, 4, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, 5, ColMajor>()))
+
+#define CALL_SUBTESTS_DIMS_LAYOUTS(PART, NAME) \
CALL_SUBTEST_PART(PART)((NAME<float, 1, RowMajor>())); \
CALL_SUBTEST_PART(PART)((NAME<float, 2, RowMajor>())); \
CALL_SUBTEST_PART(PART)((NAME<float, 3, RowMajor>())); \
@@ -767,36 +799,38 @@ static void test_assign_to_tensor_shuffle() {
CALL_SUBTEST_PART(PART)((NAME<float, 4, ColMajor>())); \
CALL_SUBTEST_PART(PART)((NAME<float, 5, ColMajor>()))
-#define CALL_SUBTESTS_LAYOUTS(PART, NAME) \
+#define CALL_SUBTESTS_LAYOUTS_TYPES(PART, NAME) \
CALL_SUBTEST_PART(PART)((NAME<float, RowMajor>())); \
- CALL_SUBTEST_PART(PART)((NAME<float, ColMajor>()))
+ CALL_SUBTEST_PART(PART)((NAME<float, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, ColMajor>()))
EIGEN_DECLARE_TEST(cxx11_tensor_block_eval) {
// clang-format off
- CALL_SUBTESTS_DIMS_LAYOUTS(1, test_eval_tensor_block);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(1, test_eval_tensor_block);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(1, test_eval_tensor_binary_expr_block);
CALL_SUBTESTS_DIMS_LAYOUTS(1, test_eval_tensor_unary_expr_block);
- CALL_SUBTESTS_DIMS_LAYOUTS(1, test_eval_tensor_binary_expr_block);
CALL_SUBTESTS_DIMS_LAYOUTS(2, test_eval_tensor_binary_with_unary_expr_block);
- CALL_SUBTESTS_DIMS_LAYOUTS(2, test_eval_tensor_broadcast);
- CALL_SUBTESTS_DIMS_LAYOUTS(2, test_eval_tensor_reshape);
- CALL_SUBTESTS_DIMS_LAYOUTS(3, test_eval_tensor_cast);
- CALL_SUBTESTS_DIMS_LAYOUTS(3, test_eval_tensor_select);
- CALL_SUBTESTS_DIMS_LAYOUTS(3, test_eval_tensor_padding);
- CALL_SUBTESTS_DIMS_LAYOUTS(4, test_eval_tensor_chipping);
- CALL_SUBTESTS_DIMS_LAYOUTS(4, test_eval_tensor_generator);
- CALL_SUBTESTS_DIMS_LAYOUTS(4, test_eval_tensor_reverse);
- CALL_SUBTESTS_DIMS_LAYOUTS(5, test_eval_tensor_slice);
- CALL_SUBTESTS_DIMS_LAYOUTS(5, test_eval_tensor_shuffle);
-
- CALL_SUBTESTS_LAYOUTS(6, test_eval_tensor_reshape_with_bcast);
- CALL_SUBTESTS_LAYOUTS(6, test_eval_tensor_forced_eval);
- CALL_SUBTESTS_LAYOUTS(6, test_eval_tensor_chipping_of_bcast);
-
- CALL_SUBTESTS_DIMS_LAYOUTS(7, test_assign_to_tensor);
- CALL_SUBTESTS_DIMS_LAYOUTS(7, test_assign_to_tensor_reshape);
- CALL_SUBTESTS_DIMS_LAYOUTS(7, test_assign_to_tensor_chipping);
- CALL_SUBTESTS_DIMS_LAYOUTS(8, test_assign_to_tensor_slice);
- CALL_SUBTESTS_DIMS_LAYOUTS(8, test_assign_to_tensor_shuffle);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(2, test_eval_tensor_broadcast);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(2, test_eval_tensor_reshape);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(3, test_eval_tensor_cast);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(3, test_eval_tensor_select);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(3, test_eval_tensor_padding);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(4, test_eval_tensor_chipping);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(4, test_eval_tensor_generator);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(4, test_eval_tensor_reverse);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(5, test_eval_tensor_slice);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(5, test_eval_tensor_shuffle);
+
+ CALL_SUBTESTS_LAYOUTS_TYPES(6, test_eval_tensor_reshape_with_bcast);
+ CALL_SUBTESTS_LAYOUTS_TYPES(6, test_eval_tensor_forced_eval);
+ CALL_SUBTESTS_LAYOUTS_TYPES(6, test_eval_tensor_chipping_of_bcast);
+
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(7, test_assign_to_tensor);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(7, test_assign_to_tensor_reshape);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(7, test_assign_to_tensor_chipping);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(8, test_assign_to_tensor_slice);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(8, test_assign_to_tensor_shuffle);
// Force CMake to split this test.
// EIGEN_SUFFIXES;1;2;3;4;5;6;7;8
diff --git a/unsupported/test/cxx11_tensor_block_io.cpp b/unsupported/test/cxx11_tensor_block_io.cpp
index a6f7a44cd..52f7dde9b 100644
--- a/unsupported/test/cxx11_tensor_block_io.cpp
+++ b/unsupported/test/cxx11_tensor_block_io.cpp
@@ -415,7 +415,15 @@ static void test_block_io_squeeze_ones() {
CALL_SUBTEST((NAME<float, 1, ColMajor>())); \
CALL_SUBTEST((NAME<float, 2, ColMajor>())); \
CALL_SUBTEST((NAME<float, 4, ColMajor>())); \
- CALL_SUBTEST((NAME<float, 5, ColMajor>()))
+ CALL_SUBTEST((NAME<float, 5, ColMajor>())); \
+ CALL_SUBTEST((NAME<bool, 1, RowMajor>())); \
+ CALL_SUBTEST((NAME<bool, 2, RowMajor>())); \
+ CALL_SUBTEST((NAME<bool, 4, RowMajor>())); \
+ CALL_SUBTEST((NAME<bool, 5, RowMajor>())); \
+ CALL_SUBTEST((NAME<bool, 1, ColMajor>())); \
+ CALL_SUBTEST((NAME<bool, 2, ColMajor>())); \
+ CALL_SUBTEST((NAME<bool, 4, ColMajor>())); \
+ CALL_SUBTEST((NAME<bool, 5, ColMajor>()))
EIGEN_DECLARE_TEST(cxx11_tensor_block_io) {
// clang-format off
diff --git a/unsupported/test/cxx11_tensor_contraction.cpp b/unsupported/test/cxx11_tensor_contraction.cpp
index 2fd128121..3b5c6a13c 100644
--- a/unsupported/test/cxx11_tensor_contraction.cpp
+++ b/unsupported/test/cxx11_tensor_contraction.cpp
@@ -562,36 +562,40 @@ static void test_large_contraction_with_output_kernel() {
EIGEN_DECLARE_TEST(cxx11_tensor_contraction)
{
- CALL_SUBTEST(test_evals<ColMajor>());
- CALL_SUBTEST(test_evals<RowMajor>());
- CALL_SUBTEST(test_scalar<ColMajor>());
- CALL_SUBTEST(test_scalar<RowMajor>());
- CALL_SUBTEST(test_multidims<ColMajor>());
- CALL_SUBTEST(test_multidims<RowMajor>());
- CALL_SUBTEST(test_holes<ColMajor>());
- CALL_SUBTEST(test_holes<RowMajor>());
- CALL_SUBTEST(test_full_redux<ColMajor>());
- CALL_SUBTEST(test_full_redux<RowMajor>());
- CALL_SUBTEST(test_contraction_of_contraction<ColMajor>());
- CALL_SUBTEST(test_contraction_of_contraction<RowMajor>());
- CALL_SUBTEST(test_expr<ColMajor>());
- CALL_SUBTEST(test_expr<RowMajor>());
- CALL_SUBTEST(test_out_of_order_contraction<ColMajor>());
- CALL_SUBTEST(test_out_of_order_contraction<RowMajor>());
- CALL_SUBTEST(test_consistency<ColMajor>());
- CALL_SUBTEST(test_consistency<RowMajor>());
- CALL_SUBTEST(test_large_contraction<ColMajor>());
- CALL_SUBTEST(test_large_contraction<RowMajor>());
- CALL_SUBTEST(test_matrix_vector<ColMajor>());
- CALL_SUBTEST(test_matrix_vector<RowMajor>());
- CALL_SUBTEST(test_tensor_vector<ColMajor>());
- CALL_SUBTEST(test_tensor_vector<RowMajor>());
- CALL_SUBTEST(test_small_blocking_factors<ColMajor>());
- CALL_SUBTEST(test_small_blocking_factors<RowMajor>());
- CALL_SUBTEST(test_tensor_product<ColMajor>());
- CALL_SUBTEST(test_tensor_product<RowMajor>());
- CALL_SUBTEST(test_const_inputs<ColMajor>());
- CALL_SUBTEST(test_const_inputs<RowMajor>());
- CALL_SUBTEST(test_large_contraction_with_output_kernel<ColMajor>());
- CALL_SUBTEST(test_large_contraction_with_output_kernel<RowMajor>());
+ CALL_SUBTEST_1(test_evals<ColMajor>());
+ CALL_SUBTEST_1(test_evals<RowMajor>());
+ CALL_SUBTEST_1(test_scalar<ColMajor>());
+ CALL_SUBTEST_1(test_scalar<RowMajor>());
+ CALL_SUBTEST_2(test_multidims<ColMajor>());
+ CALL_SUBTEST_2(test_multidims<RowMajor>());
+ CALL_SUBTEST_2(test_holes<ColMajor>());
+ CALL_SUBTEST_2(test_holes<RowMajor>());
+ CALL_SUBTEST_3(test_full_redux<ColMajor>());
+ CALL_SUBTEST_3(test_full_redux<RowMajor>());
+ CALL_SUBTEST_3(test_contraction_of_contraction<ColMajor>());
+ CALL_SUBTEST_3(test_contraction_of_contraction<RowMajor>());
+ CALL_SUBTEST_4(test_expr<ColMajor>());
+ CALL_SUBTEST_4(test_expr<RowMajor>());
+ CALL_SUBTEST_4(test_out_of_order_contraction<ColMajor>());
+ CALL_SUBTEST_4(test_out_of_order_contraction<RowMajor>());
+ CALL_SUBTEST_5(test_consistency<ColMajor>());
+ CALL_SUBTEST_5(test_consistency<RowMajor>());
+ CALL_SUBTEST_5(test_large_contraction<ColMajor>());
+ CALL_SUBTEST_5(test_large_contraction<RowMajor>());
+ CALL_SUBTEST_6(test_matrix_vector<ColMajor>());
+ CALL_SUBTEST_6(test_matrix_vector<RowMajor>());
+ CALL_SUBTEST_6(test_tensor_vector<ColMajor>());
+ CALL_SUBTEST_6(test_tensor_vector<RowMajor>());
+ CALL_SUBTEST_7(test_small_blocking_factors<ColMajor>());
+ CALL_SUBTEST_7(test_small_blocking_factors<RowMajor>());
+ CALL_SUBTEST_7(test_tensor_product<ColMajor>());
+ CALL_SUBTEST_7(test_tensor_product<RowMajor>());
+ CALL_SUBTEST_8(test_const_inputs<ColMajor>());
+ CALL_SUBTEST_8(test_const_inputs<RowMajor>());
+ CALL_SUBTEST_8(test_large_contraction_with_output_kernel<ColMajor>());
+ CALL_SUBTEST_8(test_large_contraction_with_output_kernel<RowMajor>());
+
+ // Force CMake to split this test.
+ // EIGEN_SUFFIXES;1;2;3;4;5;6;7;8
+
}
diff --git a/unsupported/test/cxx11_tensor_expr.cpp b/unsupported/test/cxx11_tensor_expr.cpp
index d56da28d8..6dc44996a 100644
--- a/unsupported/test/cxx11_tensor_expr.cpp
+++ b/unsupported/test/cxx11_tensor_expr.cpp
@@ -195,26 +195,23 @@ static void test_constants()
static void test_boolean()
{
- Tensor<int, 1> vec(31);
- std::iota(vec.data(), vec.data() + 31, 0);
+ const int kSize = 31;
+ Tensor<int, 1> vec(kSize);
+ std::iota(vec.data(), vec.data() + kSize, 0);
// Test ||.
Tensor<bool, 1> bool1 = vec < vec.constant(1) || vec > vec.constant(4);
- VERIFY_IS_EQUAL(bool1[0], true);
- VERIFY_IS_EQUAL(bool1[1], false);
- VERIFY_IS_EQUAL(bool1[2], false);
- VERIFY_IS_EQUAL(bool1[3], false);
- VERIFY_IS_EQUAL(bool1[4], false);
- VERIFY_IS_EQUAL(bool1[5], true);
+ for (int i = 0; i < kSize; ++i) {
+ bool expected = i < 1 || i > 4;
+ VERIFY_IS_EQUAL(bool1[i], expected);
+ }
// Test &&, including cast of operand vec.
Tensor<bool, 1> bool2 = vec.cast<bool>() && vec < vec.constant(4);
- VERIFY_IS_EQUAL(bool2[0], false);
- VERIFY_IS_EQUAL(bool2[1], true);
- VERIFY_IS_EQUAL(bool2[2], true);
- VERIFY_IS_EQUAL(bool2[3], true);
- VERIFY_IS_EQUAL(bool2[4], false);
- VERIFY_IS_EQUAL(bool2[5], false);
+ for (int i = 0; i < kSize; ++i) {
+ bool expected = bool(i) && i < 4;
+ VERIFY_IS_EQUAL(bool2[i], expected);
+ }
// Compilation tests:
// Test Tensor<bool> against results of cast or comparison; verifies that
diff --git a/unsupported/test/cxx11_tensor_morphing.cpp b/unsupported/test/cxx11_tensor_morphing.cpp
index eb708737d..f01b95357 100644
--- a/unsupported/test/cxx11_tensor_morphing.cpp
+++ b/unsupported/test/cxx11_tensor_morphing.cpp
@@ -64,7 +64,7 @@ static void test_static_reshape() {
#endif
}
-template<typename>
+template <typename>
static void test_reshape_in_expr() {
MatrixXf m1(2,3*5*7*11);
MatrixXf m2(3*5*7*11,13);
@@ -113,19 +113,19 @@ static void test_reshape_as_lvalue()
}
}
-template<int DataLayout>
+template<typename T, int DataLayout>
static void test_simple_slice()
{
- Tensor<float, 5, DataLayout> tensor(2,3,5,7,11);
+ Tensor<T, 5, DataLayout> tensor(2,3,5,7,11);
tensor.setRandom();
- Tensor<float, 5, DataLayout> slice1(1,1,1,1,1);
+ Tensor<T, 5, DataLayout> slice1(1,1,1,1,1);
Eigen::DSizes<ptrdiff_t, 5> indices(1,2,3,4,5);
Eigen::DSizes<ptrdiff_t, 5> sizes(1,1,1,1,1);
slice1 = tensor.slice(indices, sizes);
VERIFY_IS_EQUAL(slice1(0,0,0,0,0), tensor(1,2,3,4,5));
- Tensor<float, 5, DataLayout> slice2(1,1,2,2,3);
+ Tensor<T, 5, DataLayout> slice2(1,1,2,2,3);
Eigen::DSizes<ptrdiff_t, 5> indices2(1,1,3,4,5);
Eigen::DSizes<ptrdiff_t, 5> sizes2(1,1,2,2,3);
slice2 = tensor.slice(indices2, sizes2);
@@ -138,20 +138,20 @@ static void test_simple_slice()
}
}
-template<typename=void>
+template<typename T>
static void test_const_slice()
{
- const float b[1] = {42};
- TensorMap<Tensor<const float, 1> > m(b, 1);
+ const T b[1] = {42};
+ TensorMap<Tensor<const T, 1> > m(b, 1);
DSizes<DenseIndex, 1> offsets;
offsets[0] = 0;
- TensorRef<Tensor<const float, 1> > slice_ref(m.slice(offsets, m.dimensions()));
+ TensorRef<Tensor<const T, 1> > slice_ref(m.slice(offsets, m.dimensions()));
VERIFY_IS_EQUAL(slice_ref(0), 42);
}
-template<int DataLayout>
+template<typename T, int DataLayout>
static void test_slice_in_expr() {
- typedef Matrix<float, Dynamic, Dynamic, DataLayout> Mtx;
+ typedef Matrix<T, Dynamic, Dynamic, DataLayout> Mtx;
Mtx m1(7,7);
Mtx m2(3,3);
m1.setRandom();
@@ -159,10 +159,10 @@ static void test_slice_in_expr() {
Mtx m3 = m1.block(1, 2, 3, 3) * m2.block(0, 2, 3, 1);
- TensorMap<Tensor<float, 2, DataLayout>> tensor1(m1.data(), 7, 7);
- TensorMap<Tensor<float, 2, DataLayout>> tensor2(m2.data(), 3, 3);
- Tensor<float, 2, DataLayout> tensor3(3,1);
- typedef Tensor<float, 1>::DimensionPair DimPair;
+ TensorMap<Tensor<T, 2, DataLayout>> tensor1(m1.data(), 7, 7);
+ TensorMap<Tensor<T, 2, DataLayout>> tensor2(m2.data(), 3, 3);
+ Tensor<T, 2, DataLayout> tensor3(3,1);
+ typedef typename Tensor<T, 1>::DimensionPair DimPair;
array<DimPair, 1> contract_along{{DimPair(1, 0)}};
Eigen::DSizes<ptrdiff_t, 2> indices1(1,2);
@@ -179,28 +179,28 @@ static void test_slice_in_expr() {
}
// Take an arbitrary slice of an arbitrarily sized tensor.
- TensorMap<Tensor<const float, 2, DataLayout>> tensor4(m1.data(), 7, 7);
- Tensor<float, 1, DataLayout> tensor6 = tensor4.reshape(DSizes<ptrdiff_t, 1>(7*7)).exp().slice(DSizes<ptrdiff_t, 1>(0), DSizes<ptrdiff_t, 1>(35));
+ TensorMap<Tensor<const T, 2, DataLayout>> tensor4(m1.data(), 7, 7);
+ Tensor<T, 1, DataLayout> tensor6 = tensor4.reshape(DSizes<ptrdiff_t, 1>(7*7)).exp().slice(DSizes<ptrdiff_t, 1>(0), DSizes<ptrdiff_t, 1>(35));
for (int i = 0; i < 35; ++i) {
VERIFY_IS_APPROX(tensor6(i), expf(tensor4.data()[i]));
}
}
-template<int DataLayout>
+template<typename T, int DataLayout>
static void test_slice_as_lvalue()
{
- Tensor<float, 3, DataLayout> tensor1(2,2,7);
+ Tensor<T, 3, DataLayout> tensor1(2,2,7);
tensor1.setRandom();
- Tensor<float, 3, DataLayout> tensor2(2,2,7);
+ Tensor<T, 3, DataLayout> tensor2(2,2,7);
tensor2.setRandom();
- Tensor<float, 3, DataLayout> tensor3(4,3,5);
+ Tensor<T, 3, DataLayout> tensor3(4,3,5);
tensor3.setRandom();
- Tensor<float, 3, DataLayout> tensor4(4,3,2);
+ Tensor<T, 3, DataLayout> tensor4(4,3,2);
tensor4.setRandom();
- Tensor<float, 3, DataLayout> tensor5(10,13,12);
+ Tensor<T, 3, DataLayout> tensor5(10,13,12);
tensor5.setRandom();
- Tensor<float, 3, DataLayout> result(4,5,7);
+ Tensor<T, 3, DataLayout> result(4,5,7);
Eigen::DSizes<ptrdiff_t, 3> sizes12(2,2,7);
Eigen::DSizes<ptrdiff_t, 3> first_slice(0,0,0);
result.slice(first_slice, sizes12) = tensor1;
@@ -246,10 +246,10 @@ static void test_slice_as_lvalue()
}
}
-template<int DataLayout>
+template<typename T, int DataLayout>
static void test_slice_raw_data()
{
- Tensor<float, 4, DataLayout> tensor(3,5,7,11);
+ Tensor<T, 4, DataLayout> tensor(3,5,7,11);
tensor.setRandom();
Eigen::DSizes<ptrdiff_t, 4> offsets(1,2,3,4);
@@ -276,7 +276,7 @@ static void test_slice_raw_data()
extents = Eigen::DSizes<ptrdiff_t, 4>(1,2,1,1);
auto slice3 = SliceEvaluator(tensor.slice(offsets, extents), DefaultDevice());
VERIFY_IS_EQUAL(slice3.dimensions().TotalSize(), 2);
- VERIFY_IS_EQUAL(slice3.data(), static_cast<float*>(0));
+ VERIFY_IS_EQUAL(slice3.data(), static_cast<T*>(0));
if (DataLayout == ColMajor) {
offsets = Eigen::DSizes<ptrdiff_t, 4>(0,2,3,4);
@@ -341,15 +341,15 @@ static void test_slice_raw_data()
}
-template<int DataLayout>
+template<typename T, int DataLayout>
static void test_strided_slice()
{
- typedef Tensor<float, 5, DataLayout> Tensor5f;
+ typedef Tensor<T, 5, DataLayout> Tensor5f;
typedef Eigen::DSizes<Eigen::DenseIndex, 5> Index5;
- typedef Tensor<float, 2, DataLayout> Tensor2f;
+ typedef Tensor<T, 2, DataLayout> Tensor2f;
typedef Eigen::DSizes<Eigen::DenseIndex, 2> Index2;
- Tensor<float, 5, DataLayout> tensor(2,3,5,7,11);
- Tensor<float, 2, DataLayout> tensor2(7,11);
+ Tensor<T, 5, DataLayout> tensor(2,3,5,7,11);
+ Tensor<T, 2, DataLayout> tensor2(7,11);
tensor.setRandom();
tensor2.setRandom();
@@ -435,13 +435,13 @@ static void test_strided_slice()
}
}
-template<int DataLayout>
+template<typename T, int DataLayout>
static void test_strided_slice_write()
{
- typedef Tensor<float, 2, DataLayout> Tensor2f;
+ typedef Tensor<T, 2, DataLayout> Tensor2f;
typedef Eigen::DSizes<Eigen::DenseIndex, 2> Index2;
- Tensor<float, 2, DataLayout> tensor(7,11),tensor2(7,11);
+ Tensor<T, 2, DataLayout> tensor(7,11),tensor2(7,11);
tensor.setRandom();
tensor2=tensor;
Tensor2f slice(2,3);
@@ -461,15 +461,14 @@ static void test_strided_slice_write()
}
}
-
-template<int DataLayout>
+template<typename T, int DataLayout>
static void test_composition()
{
- Eigen::Tensor<float, 2, DataLayout> matrix(7, 11);
+ Eigen::Tensor<T, 2, DataLayout> matrix(7, 11);
matrix.setRandom();
const DSizes<ptrdiff_t, 3> newDims(1, 1, 11);
- Eigen::Tensor<float, 3, DataLayout> tensor =
+ Eigen::Tensor<T, 3, DataLayout> tensor =
matrix.slice(DSizes<ptrdiff_t, 2>(2, 0), DSizes<ptrdiff_t, 2>(1, 11)).reshape(newDims);
VERIFY_IS_EQUAL(tensor.dimensions().TotalSize(), 11);
@@ -481,29 +480,27 @@ static void test_composition()
}
}
+#define CALL_SUBTEST_PART(PART) \
+ CALL_SUBTEST_##PART
+
+#define CALL_SUBTESTS_TYPES_LAYOUTS(PART, NAME) \
+ CALL_SUBTEST_PART(PART)((NAME<float, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<float, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, RowMajor>()))
EIGEN_DECLARE_TEST(cxx11_tensor_morphing)
{
CALL_SUBTEST_1(test_simple_reshape<void>());
CALL_SUBTEST_1(test_static_reshape<void>());
- CALL_SUBTEST_1(test_reshape_in_expr<void>());
CALL_SUBTEST_1(test_reshape_as_lvalue<void>());
-
- CALL_SUBTEST_1(test_simple_slice<ColMajor>());
- CALL_SUBTEST_1(test_simple_slice<RowMajor>());
- CALL_SUBTEST_1(test_const_slice());
- CALL_SUBTEST_2(test_slice_in_expr<ColMajor>());
- CALL_SUBTEST_3(test_slice_in_expr<RowMajor>());
- CALL_SUBTEST_4(test_slice_as_lvalue<ColMajor>());
- CALL_SUBTEST_4(test_slice_as_lvalue<RowMajor>());
- CALL_SUBTEST_5(test_slice_raw_data<ColMajor>());
- CALL_SUBTEST_5(test_slice_raw_data<RowMajor>());
-
- CALL_SUBTEST_6(test_strided_slice_write<ColMajor>());
- CALL_SUBTEST_6(test_strided_slice<ColMajor>());
- CALL_SUBTEST_6(test_strided_slice_write<RowMajor>());
- CALL_SUBTEST_6(test_strided_slice<RowMajor>());
-
- CALL_SUBTEST_7(test_composition<ColMajor>());
- CALL_SUBTEST_7(test_composition<RowMajor>());
+ CALL_SUBTEST_1(test_reshape_in_expr<void>());
+ CALL_SUBTEST_1(test_const_slice<float>());
+
+ CALL_SUBTESTS_TYPES_LAYOUTS(2, test_simple_slice);
+ CALL_SUBTESTS_TYPES_LAYOUTS(3, test_slice_as_lvalue);
+ CALL_SUBTESTS_TYPES_LAYOUTS(4, test_slice_raw_data);
+ CALL_SUBTESTS_TYPES_LAYOUTS(5, test_strided_slice_write);
+ CALL_SUBTESTS_TYPES_LAYOUTS(6, test_strided_slice);
+ CALL_SUBTESTS_TYPES_LAYOUTS(7, test_composition);
}