aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/test/cxx11_tensor_block_eval.cpp
diff options
context:
space:
mode:
authorGravatar Rasmus Munk Larsen <rmlarsen@google.com>2020-04-24 17:29:25 -0700
committerGravatar Rasmus Munk Larsen <rmlarsen@google.com>2020-04-28 16:12:47 +0000
commitab773c7e914633ec4a3ee1f7cdea8b168d3bce1a (patch)
tree5a82a3071e3c5de39e934ce144023c20f62b2de4 /unsupported/test/cxx11_tensor_block_eval.cpp
parentb47c7779937c1984b7cd2f1d2f8df33d67c396f7 (diff)
Extend support for Packet16b:
* Add ptranspose<*,4> to support matmul and add unit test for Matrix<bool> * Matrix<bool> * work around a bug in slicing of Tensor<bool>. * Add tensor tests This speeds up matmul for boolean matrices by about 10x name old time/op new time/op delta BM_MatMul<bool>/8 267ns ± 0% 479ns ± 0% +79.25% (p=0.008 n=5+5) BM_MatMul<bool>/32 6.42µs ± 0% 0.87µs ± 0% -86.50% (p=0.008 n=5+5) BM_MatMul<bool>/64 43.3µs ± 0% 5.9µs ± 0% -86.42% (p=0.008 n=5+5) BM_MatMul<bool>/128 315µs ± 0% 44µs ± 0% -85.98% (p=0.008 n=5+5) BM_MatMul<bool>/256 2.41ms ± 0% 0.34ms ± 0% -85.68% (p=0.008 n=5+5) BM_MatMul<bool>/512 18.8ms ± 0% 2.7ms ± 0% -85.53% (p=0.008 n=5+5) BM_MatMul<bool>/1k 149ms ± 0% 22ms ± 0% -85.40% (p=0.008 n=5+5)
Diffstat (limited to 'unsupported/test/cxx11_tensor_block_eval.cpp')
-rw-r--r--unsupported/test/cxx11_tensor_block_eval.cpp94
1 files changed, 64 insertions, 30 deletions
diff --git a/unsupported/test/cxx11_tensor_block_eval.cpp b/unsupported/test/cxx11_tensor_block_eval.cpp
index 226c495aa..a7a49fa1f 100644
--- a/unsupported/test/cxx11_tensor_block_eval.cpp
+++ b/unsupported/test/cxx11_tensor_block_eval.cpp
@@ -233,7 +233,7 @@ static void test_eval_tensor_binary_expr_block() {
rhs.setRandom();
VerifyBlockEvaluator<T, NumDims, Layout>(
- lhs + rhs, [&dims]() { return RandomBlock<Layout>(dims, 1, 10); });
+ lhs * rhs, [&dims]() { return RandomBlock<Layout>(dims, 1, 10); });
}
template <typename T, int NumDims, int Layout>
@@ -274,7 +274,7 @@ static void test_eval_tensor_broadcast() {
// Check that desc.destination() memory is not shared between two broadcast
// materializations.
VerifyBlockEvaluator<T, NumDims, Layout>(
- input.broadcast(bcast) + input.square().broadcast(bcast),
+ input.broadcast(bcast) * input.square().broadcast(bcast),
[&bcasted_dims]() { return SkewedInnerBlock<Layout>(bcasted_dims); });
}
@@ -509,7 +509,7 @@ static void test_eval_tensor_reshape_with_bcast() {
DSizes<Index, 2> dims(dim, dim);
VerifyBlockEvaluator<T, 2, Layout>(
- lhs.reshape(reshapeLhs).broadcast(bcastLhs) +
+ lhs.reshape(reshapeLhs).broadcast(bcastLhs) *
rhs.reshape(reshapeRhs).broadcast(bcastRhs),
[dims]() { return SkewedInnerBlock<Layout, 2>(dims); });
}
@@ -529,11 +529,11 @@ static void test_eval_tensor_forced_eval() {
DSizes<Index, 2> dims(dim, dim);
VerifyBlockEvaluator<T, 2, Layout>(
- (lhs.broadcast(bcastLhs) + rhs.broadcast(bcastRhs)).eval().reshape(dims),
+ (lhs.broadcast(bcastLhs) * rhs.broadcast(bcastRhs)).eval().reshape(dims),
[dims]() { return SkewedInnerBlock<Layout, 2>(dims); });
VerifyBlockEvaluator<T, 2, Layout>(
- (lhs.broadcast(bcastLhs) + rhs.broadcast(bcastRhs)).eval().reshape(dims),
+ (lhs.broadcast(bcastLhs) * rhs.broadcast(bcastRhs)).eval().reshape(dims),
[dims]() { return RandomBlock<Layout, 2>(dims, 1, 50); });
}
@@ -755,7 +755,39 @@ static void test_assign_to_tensor_shuffle() {
#define CALL_SUBTEST_PART(PART) \
CALL_SUBTEST_##PART
-#define CALL_SUBTESTS_DIMS_LAYOUTS(PART, NAME) \
+#define CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(PART, NAME) \
+ CALL_SUBTEST_PART(PART)((NAME<float, 1, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<float, 2, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<float, 3, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<float, 4, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<float, 5, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<float, 1, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<float, 2, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<float, 4, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<float, 4, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<float, 5, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<int, 1, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<int, 2, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<int, 3, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<int, 4, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<int, 5, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<int, 1, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<int, 2, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<int, 4, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<int, 4, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<int, 5, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, 1, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, 2, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, 3, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, 4, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, 5, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, 1, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, 2, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, 4, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, 4, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, 5, ColMajor>()))
+
+#define CALL_SUBTESTS_DIMS_LAYOUTS(PART, NAME) \
CALL_SUBTEST_PART(PART)((NAME<float, 1, RowMajor>())); \
CALL_SUBTEST_PART(PART)((NAME<float, 2, RowMajor>())); \
CALL_SUBTEST_PART(PART)((NAME<float, 3, RowMajor>())); \
@@ -767,36 +799,38 @@ static void test_assign_to_tensor_shuffle() {
CALL_SUBTEST_PART(PART)((NAME<float, 4, ColMajor>())); \
CALL_SUBTEST_PART(PART)((NAME<float, 5, ColMajor>()))
-#define CALL_SUBTESTS_LAYOUTS(PART, NAME) \
+#define CALL_SUBTESTS_LAYOUTS_TYPES(PART, NAME) \
CALL_SUBTEST_PART(PART)((NAME<float, RowMajor>())); \
- CALL_SUBTEST_PART(PART)((NAME<float, ColMajor>()))
+ CALL_SUBTEST_PART(PART)((NAME<float, ColMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, RowMajor>())); \
+ CALL_SUBTEST_PART(PART)((NAME<bool, ColMajor>()))
EIGEN_DECLARE_TEST(cxx11_tensor_block_eval) {
// clang-format off
- CALL_SUBTESTS_DIMS_LAYOUTS(1, test_eval_tensor_block);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(1, test_eval_tensor_block);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(1, test_eval_tensor_binary_expr_block);
CALL_SUBTESTS_DIMS_LAYOUTS(1, test_eval_tensor_unary_expr_block);
- CALL_SUBTESTS_DIMS_LAYOUTS(1, test_eval_tensor_binary_expr_block);
CALL_SUBTESTS_DIMS_LAYOUTS(2, test_eval_tensor_binary_with_unary_expr_block);
- CALL_SUBTESTS_DIMS_LAYOUTS(2, test_eval_tensor_broadcast);
- CALL_SUBTESTS_DIMS_LAYOUTS(2, test_eval_tensor_reshape);
- CALL_SUBTESTS_DIMS_LAYOUTS(3, test_eval_tensor_cast);
- CALL_SUBTESTS_DIMS_LAYOUTS(3, test_eval_tensor_select);
- CALL_SUBTESTS_DIMS_LAYOUTS(3, test_eval_tensor_padding);
- CALL_SUBTESTS_DIMS_LAYOUTS(4, test_eval_tensor_chipping);
- CALL_SUBTESTS_DIMS_LAYOUTS(4, test_eval_tensor_generator);
- CALL_SUBTESTS_DIMS_LAYOUTS(4, test_eval_tensor_reverse);
- CALL_SUBTESTS_DIMS_LAYOUTS(5, test_eval_tensor_slice);
- CALL_SUBTESTS_DIMS_LAYOUTS(5, test_eval_tensor_shuffle);
-
- CALL_SUBTESTS_LAYOUTS(6, test_eval_tensor_reshape_with_bcast);
- CALL_SUBTESTS_LAYOUTS(6, test_eval_tensor_forced_eval);
- CALL_SUBTESTS_LAYOUTS(6, test_eval_tensor_chipping_of_bcast);
-
- CALL_SUBTESTS_DIMS_LAYOUTS(7, test_assign_to_tensor);
- CALL_SUBTESTS_DIMS_LAYOUTS(7, test_assign_to_tensor_reshape);
- CALL_SUBTESTS_DIMS_LAYOUTS(7, test_assign_to_tensor_chipping);
- CALL_SUBTESTS_DIMS_LAYOUTS(8, test_assign_to_tensor_slice);
- CALL_SUBTESTS_DIMS_LAYOUTS(8, test_assign_to_tensor_shuffle);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(2, test_eval_tensor_broadcast);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(2, test_eval_tensor_reshape);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(3, test_eval_tensor_cast);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(3, test_eval_tensor_select);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(3, test_eval_tensor_padding);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(4, test_eval_tensor_chipping);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(4, test_eval_tensor_generator);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(4, test_eval_tensor_reverse);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(5, test_eval_tensor_slice);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(5, test_eval_tensor_shuffle);
+
+ CALL_SUBTESTS_LAYOUTS_TYPES(6, test_eval_tensor_reshape_with_bcast);
+ CALL_SUBTESTS_LAYOUTS_TYPES(6, test_eval_tensor_forced_eval);
+ CALL_SUBTESTS_LAYOUTS_TYPES(6, test_eval_tensor_chipping_of_bcast);
+
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(7, test_assign_to_tensor);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(7, test_assign_to_tensor_reshape);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(7, test_assign_to_tensor_chipping);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(8, test_assign_to_tensor_slice);
+ CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(8, test_assign_to_tensor_shuffle);
// Force CMake to split this test.
// EIGEN_SUFFIXES;1;2;3;4;5;6;7;8