aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/test/cxx11_tensor_contraction.cpp
diff options
context:
space:
mode:
authorGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2015-01-14 15:46:04 -0800
committerGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2015-01-14 15:46:04 -0800
commitb5124e7cfda27ed99dcfcec8cb1b674efa1ef4a3 (patch)
tree7f8378843a756af14785e563689b4765e062a953 /unsupported/test/cxx11_tensor_contraction.cpp
parent54e3633b437e44ed4d370c9f8868535192308ca3 (diff)
Created many additional tests
Diffstat (limited to 'unsupported/test/cxx11_tensor_contraction.cpp')
-rw-r--r--unsupported/test/cxx11_tensor_contraction.cpp221
1 files changed, 141 insertions, 80 deletions
diff --git a/unsupported/test/cxx11_tensor_contraction.cpp b/unsupported/test/cxx11_tensor_contraction.cpp
index 17bd335f7..6124818fd 100644
--- a/unsupported/test/cxx11_tensor_contraction.cpp
+++ b/unsupported/test/cxx11_tensor_contraction.cpp
@@ -16,18 +16,18 @@ using Eigen::Tensor;
typedef Tensor<float, 1>::DimensionPair DimPair;
-
+template<int DataLayout>
static void test_evals()
{
- Tensor<float, 2> mat1(2, 3);
- Tensor<float, 2> mat2(2, 3);
- Tensor<float, 2> mat3(3, 2);
+ Tensor<float, 2, DataLayout> mat1(2, 3);
+ Tensor<float, 2, DataLayout> mat2(2, 3);
+ Tensor<float, 2, DataLayout> mat3(3, 2);
mat1.setRandom();
mat2.setRandom();
mat3.setRandom();
- Tensor<float, 2> mat4(3,3);
+ Tensor<float, 2, DataLayout> mat4(3,3);
mat4.setZero();
Eigen::array<DimPair, 1> dims3({{DimPair(0, 0)}});
typedef TensorEvaluator<decltype(mat1.contract(mat2, dims3)), DefaultDevice> Evaluator;
@@ -47,7 +47,7 @@ static void test_evals()
VERIFY_IS_APPROX(mat4(2,1), mat1(0,2)*mat2(0,1) + mat1(1,2)*mat2(1,1));
VERIFY_IS_APPROX(mat4(2,2), mat1(0,2)*mat2(0,2) + mat1(1,2)*mat2(1,2));
- Tensor<float, 2> mat5(2,2);
+ Tensor<float, 2, DataLayout> mat5(2,2);
mat5.setZero();
Eigen::array<DimPair, 1> dims4({{DimPair(1, 1)}});
typedef TensorEvaluator<decltype(mat1.contract(mat2, dims4)), DefaultDevice> Evaluator2;
@@ -62,7 +62,7 @@ static void test_evals()
VERIFY_IS_APPROX(mat5(1,0), mat1(1,0)*mat2(0,0) + mat1(1,1)*mat2(0,1) + mat1(1,2)*mat2(0,2));
VERIFY_IS_APPROX(mat5(1,1), mat1(1,0)*mat2(1,0) + mat1(1,1)*mat2(1,1) + mat1(1,2)*mat2(1,2));
- Tensor<float, 2> mat6(2,2);
+ Tensor<float, 2, DataLayout> mat6(2,2);
mat6.setZero();
Eigen::array<DimPair, 1> dims6({{DimPair(1, 0)}});
typedef TensorEvaluator<decltype(mat1.contract(mat3, dims6)), DefaultDevice> Evaluator3;
@@ -78,16 +78,16 @@ static void test_evals()
VERIFY_IS_APPROX(mat6(1,1), mat1(1,0)*mat3(0,1) + mat1(1,1)*mat3(1,1) + mat1(1,2)*mat3(2,1));
}
-
+template<int DataLayout>
static void test_scalar()
{
- Tensor<float, 1> vec1({6});
- Tensor<float, 1> vec2({6});
+ Tensor<float, 1, DataLayout> vec1({6});
+ Tensor<float, 1, DataLayout> vec2({6});
vec1.setRandom();
vec2.setRandom();
- Tensor<float, 1> scalar(1);
+ Tensor<float, 1, DataLayout> scalar(1);
scalar.setZero();
Eigen::array<DimPair, 1> dims({{DimPair(0, 0)}});
typedef TensorEvaluator<decltype(vec1.contract(vec2, dims)), DefaultDevice> Evaluator;
@@ -102,16 +102,16 @@ static void test_scalar()
VERIFY_IS_APPROX(scalar(0), expected);
}
-
+template<int DataLayout>
static void test_multidims()
{
- Tensor<float, 3> mat1(2, 2, 2);
- Tensor<float, 4> mat2(2, 2, 2, 2);
+ Tensor<float, 3, DataLayout> mat1(2, 2, 2);
+ Tensor<float, 4, DataLayout> mat2(2, 2, 2, 2);
mat1.setRandom();
mat2.setRandom();
- Tensor<float, 3> mat3(2, 2, 2);
+ Tensor<float, 3, DataLayout> mat3(2, 2, 2);
mat3.setZero();
Eigen::array<DimPair, 2> dims({{DimPair(1, 2), DimPair(2, 3)}});
typedef TensorEvaluator<decltype(mat1.contract(mat2, dims)), DefaultDevice> Evaluator;
@@ -140,15 +140,15 @@ static void test_multidims()
mat1(1,0,1)*mat2(1,1,0,1) + mat1(1,1,1)*mat2(1,1,1,1));
}
-
+template<int DataLayout>
static void test_holes() {
- Tensor<float, 4> t1(2, 5, 7, 3);
- Tensor<float, 5> t2(2, 7, 11, 13, 3);
+ Tensor<float, 4, DataLayout> t1(2, 5, 7, 3);
+ Tensor<float, 5, DataLayout> t2(2, 7, 11, 13, 3);
t1.setRandom();
t2.setRandom();
Eigen::array<DimPair, 2> dims({{DimPair(0, 0), DimPair(3, 4)}});
- Tensor<float, 5> result = t1.contract(t2, dims);
+ Tensor<float, 5, DataLayout> result = t1.contract(t2, dims);
VERIFY_IS_EQUAL(result.dimension(0), 5);
VERIFY_IS_EQUAL(result.dimension(1), 7);
VERIFY_IS_EQUAL(result.dimension(2), 7);
@@ -174,16 +174,16 @@ static void test_holes() {
}
}
-
+template<int DataLayout>
static void test_full_redux()
{
- Tensor<float, 2> t1(2, 2);
- Tensor<float, 3> t2(2, 2, 2);
+ Tensor<float, 2, DataLayout> t1(2, 2);
+ Tensor<float, 3, DataLayout> t2(2, 2, 2);
t1.setRandom();
t2.setRandom();
Eigen::array<DimPair, 2> dims({{DimPair(0, 0), DimPair(1, 1)}});
- Tensor<float, 1> result = t1.contract(t2, dims);
+ Tensor<float, 1, DataLayout> result = t1.contract(t2, dims);
VERIFY_IS_EQUAL(result.dimension(0), 2);
VERIFY_IS_APPROX(result(0), t1(0, 0) * t2(0, 0, 0) + t1(1, 0) * t2(1, 0, 0)
+ t1(0, 1) * t2(0, 1, 0) + t1(1, 1) * t2(1, 1, 0));
@@ -200,13 +200,13 @@ static void test_full_redux()
+ t1(0, 1) * t2(1, 0, 1) + t1(1, 1) * t2(1, 1, 1));
}
-
+template<int DataLayout>
static void test_contraction_of_contraction()
{
- Tensor<float, 2> t1(2, 2);
- Tensor<float, 2> t2(2, 2);
- Tensor<float, 2> t3(2, 2);
- Tensor<float, 2> t4(2, 2);
+ Tensor<float, 2, DataLayout> t1(2, 2);
+ Tensor<float, 2, DataLayout> t2(2, 2);
+ Tensor<float, 2, DataLayout> t3(2, 2);
+ Tensor<float, 2, DataLayout> t4(2, 2);
t1.setRandom();
t2.setRandom();
t3.setRandom();
@@ -216,30 +216,32 @@ static void test_contraction_of_contraction()
auto contract1 = t1.contract(t2, dims);
auto diff = t3 - contract1;
auto contract2 = t1.contract(t4, dims);
- Tensor<float, 2> result = contract2.contract(diff, dims);
+ Tensor<float, 2, DataLayout> result = contract2.contract(diff, dims);
+
VERIFY_IS_EQUAL(result.dimension(0), 2);
VERIFY_IS_EQUAL(result.dimension(1), 2);
- Eigen::Map<MatrixXf> m1(t1.data(), 2, 2);
- Eigen::Map<MatrixXf> m2(t2.data(), 2, 2);
- Eigen::Map<MatrixXf> m3(t3.data(), 2, 2);
- Eigen::Map<MatrixXf> m4(t4.data(), 2, 2);
- Eigen::MatrixXf expected = (m1 * m4) * (m3 - m1 * m2);
+ Eigen::Map<Eigen::Matrix<float, Dynamic, Dynamic, DataLayout>>
+ m1(t1.data(), 2, 2), m2(t2.data(), 2, 2), m3(t3.data(), 2, 2),
+ m4(t4.data(), 2, 2);
+ Eigen::Matrix<float, Dynamic, Dynamic, DataLayout>
+ expected = (m1 * m4) * (m3 - m1 * m2);
+
VERIFY_IS_APPROX(result(0, 0), expected(0, 0));
VERIFY_IS_APPROX(result(0, 1), expected(0, 1));
VERIFY_IS_APPROX(result(1, 0), expected(1, 0));
VERIFY_IS_APPROX(result(1, 1), expected(1, 1));
}
-
+template<int DataLayout>
static void test_expr()
{
- Tensor<float, 2> mat1(2, 3);
- Tensor<float, 2> mat2(3, 2);
+ Tensor<float, 2, DataLayout> mat1(2, 3);
+ Tensor<float, 2, DataLayout> mat2(3, 2);
mat1.setRandom();
mat2.setRandom();
- Tensor<float, 2> mat3(2,2);
+ Tensor<float, 2, DataLayout> mat3(2,2);
Eigen::array<DimPair, 1> dims({{DimPair(1, 0)}});
mat3 = mat1.contract(mat2, dims);
@@ -250,16 +252,16 @@ static void test_expr()
VERIFY_IS_APPROX(mat3(1,1), mat1(1,0)*mat2(0,1) + mat1(1,1)*mat2(1,1) + mat1(1,2)*mat2(2,1));
}
-
+template<int DataLayout>
static void test_out_of_order_contraction()
{
- Tensor<float, 3> mat1(2, 2, 2);
- Tensor<float, 3> mat2(2, 2, 2);
+ Tensor<float, 3, DataLayout> mat1(2, 2, 2);
+ Tensor<float, 3, DataLayout> mat2(2, 2, 2);
mat1.setRandom();
mat2.setRandom();
- Tensor<float, 2> mat3(2, 2);
+ Tensor<float, 2, DataLayout> mat3(2, 2);
Eigen::array<DimPair, 2> dims({{DimPair(2, 0), DimPair(0, 2)}});
mat3 = mat1.contract(mat2, dims);
@@ -295,18 +297,18 @@ static void test_out_of_order_contraction()
}
-
+template<int DataLayout>
static void test_consistency()
{
// this does something like testing (A*B)^T = (B^T * A^T)
- Tensor<float, 3> mat1(4, 3, 5);
- Tensor<float, 5> mat2(3, 2, 1, 5, 4);
+ Tensor<float, 3, DataLayout> mat1(4, 3, 5);
+ Tensor<float, 5, DataLayout> mat2(3, 2, 1, 5, 4);
mat1.setRandom();
mat2.setRandom();
- Tensor<float, 4> mat3(5, 2, 1, 5);
- Tensor<float, 4> mat4(2, 1, 5, 5);
+ Tensor<float, 4, DataLayout> mat3(5, 2, 1, 5);
+ Tensor<float, 4, DataLayout> mat4(2, 1, 5, 5);
// contract on dimensions of size 4 and 3
Eigen::array<DimPair, 2> dims1({{DimPair(0, 4), DimPair(1, 0)}});
@@ -316,27 +318,40 @@ static void test_consistency()
mat4 = mat2.contract(mat1, dims2);
// check that these are equal except for ordering of dimensions
- for (size_t i = 0; i < 5; i++) {
- for (size_t j = 0; j < 10; j++) {
- VERIFY_IS_APPROX(mat3.data()[i + 5 * j], mat4.data()[j + 10 * i]);
+ if (DataLayout == ColMajor) {
+ for (size_t i = 0; i < 5; i++) {
+ for (size_t j = 0; j < 10; j++) {
+ VERIFY_IS_APPROX(mat3.data()[i + 5 * j], mat4.data()[j + 10 * i]);
+ }
+ }
+ } else {
+ // Row major
+ for (size_t i = 0; i < 5; i++) {
+ for (size_t j = 0; j < 10; j++) {
+ VERIFY_IS_APPROX(mat3.data()[10 * i + j], mat4.data()[i + 5 * j]);
+ }
}
}
}
-
+template<int DataLayout>
static void test_large_contraction()
{
- Tensor<float, 4> t_left(30, 50, 8, 31);
- Tensor<float, 5> t_right(8, 31, 7, 20, 10);
- Tensor<float, 5> t_result(30, 50, 7, 20, 10);
+ Tensor<float, 4, DataLayout> t_left(30, 50, 8, 31);
+ Tensor<float, 5, DataLayout> t_right(8, 31, 7, 20, 10);
+ Tensor<float, 5, DataLayout> t_result(30, 50, 7, 20, 10);
t_left.setRandom();
t_right.setRandom();
- typedef Map<MatrixXf> MapXf;
+ // Add a little offset so that the results won't be close to zero.
+ t_left += t_left.constant(1.0f);
+ t_right += t_right.constant(1.0f);
+
+ typedef Map<Eigen::Matrix<float, Dynamic, Dynamic, DataLayout>> MapXf;
MapXf m_left(t_left.data(), 1500, 248);
MapXf m_right(t_right.data(), 248, 1400);
- MatrixXf m_result(1500, 1400);
+ Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> m_result(1500, 1400);
// this contraction should be equivalent to a single matrix multiplication
Eigen::array<DimPair, 2> dims({{DimPair(2, 0), DimPair(3, 1)}});
@@ -351,20 +366,20 @@ static void test_large_contraction()
}
}
-
+template<int DataLayout>
static void test_matrix_vector()
{
- Tensor<float, 2> t_left(30, 50);
- Tensor<float, 1> t_right(50);
- Tensor<float, 1> t_result(30);
+ Tensor<float, 2, DataLayout> t_left(30, 50);
+ Tensor<float, 1, DataLayout> t_right(50);
+ Tensor<float, 1, DataLayout> t_result(30);
t_left.setRandom();
t_right.setRandom();
- typedef Map<Eigen::Matrix<float, Dynamic, Dynamic>> MapXf;
+ typedef Map<Eigen::Matrix<float, Dynamic, Dynamic, DataLayout>> MapXf;
MapXf m_left(t_left.data(), 30, 50);
MapXf m_right(t_right.data(), 50, 1);
- Eigen::Matrix<float, Dynamic, Dynamic> m_result(30, 1);
+ Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> m_result(30, 1);
// this contraction should be equivalent to a single matrix multiplication
Eigen::array<DimPair, 1> dims{{DimPair(1, 0)}};
@@ -379,18 +394,19 @@ static void test_matrix_vector()
}
+template<int DataLayout>
static void test_tensor_vector()
{
- Tensor<float, 3> t_left(7, 13, 17);
- Tensor<float, 2> t_right(1, 7);
- typedef typename Tensor<float, 1>::DimensionPair DimensionPair;
+ Tensor<float, 3, DataLayout> t_left(7, 13, 17);
+ Tensor<float, 2, DataLayout> t_right(1, 7);
+ typedef typename Tensor<float, 1, DataLayout>::DimensionPair DimensionPair;
Eigen::array<DimensionPair, 1> dim_pair01{{{0, 1}}};
- Tensor<float, 3> t_result = t_left.contract(t_right, dim_pair01);
+ Tensor<float, 3, DataLayout> t_result = t_left.contract(t_right, dim_pair01);
- typedef Map<Eigen::Matrix<float, Dynamic, Dynamic>> MapXf;
+ typedef Map<Eigen::Matrix<float, Dynamic, Dynamic, DataLayout>> MapXf;
MapXf m_left(t_left.data(), 7, 13*17);
MapXf m_right(t_right.data(), 1, 7);
- Eigen::Matrix<float, Dynamic, Dynamic> m_result = m_left.transpose() * m_right.transpose();
+ Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> m_result = m_left.transpose() * m_right.transpose();
for (size_t i = 0; i < t_result.dimensions().TotalSize(); i++) {
VERIFY_IS_APPROX(t_result(i), m_result(i, 0));
@@ -398,18 +414,63 @@ static void test_tensor_vector()
}
+template<int DataLayout>
+static void test_small_blocking_factors()
+{
+ Tensor<float, 4, DataLayout> t_left(30, 5, 3, 31);
+ Tensor<float, 5, DataLayout> t_right(3, 31, 7, 20, 1);
+ t_left.setRandom();
+ t_right.setRandom();
+
+ // Add a little offset so that the results won't be close to zero.
+ t_left += t_left.constant(1.0f);
+ t_right += t_right.constant(1.0f);
+
+ // Force the cache sizes, which results in smaller blocking factors.
+ Eigen::setCpuCacheSizes(896, 1920, 2944);
+
+ // this contraction should be equivalent to a single matrix multiplication
+ Eigen::array<DimPair, 2> dims({{DimPair(2, 0), DimPair(3, 1)}});
+ Tensor<float, 5, DataLayout> t_result;
+ t_result = t_left.contract(t_right, dims);
+
+ // compute result using a simple eigen matrix product
+ Map<Eigen::Matrix<float, Dynamic, Dynamic, DataLayout>> m_left(t_left.data(), 150, 93);
+ Map<Eigen::Matrix<float, Dynamic, Dynamic, DataLayout>> m_right(t_right.data(), 93, 140);
+ Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> m_result = m_left * m_right;
+
+ for (size_t i = 0; i < t_result.dimensions().TotalSize(); i++) {
+ VERIFY_IS_APPROX(t_result.data()[i], m_result.data()[i]);
+ }
+}
+
+
void test_cxx11_tensor_contraction()
{
- CALL_SUBTEST(test_evals());
- CALL_SUBTEST(test_scalar());
- CALL_SUBTEST(test_multidims());
- CALL_SUBTEST(test_holes());
- CALL_SUBTEST(test_full_redux());
- CALL_SUBTEST(test_contraction_of_contraction());
- CALL_SUBTEST(test_expr());
- CALL_SUBTEST(test_out_of_order_contraction());
- CALL_SUBTEST(test_consistency());
- CALL_SUBTEST(test_large_contraction());
- CALL_SUBTEST(test_matrix_vector());
- CALL_SUBTEST(test_tensor_vector());
+ CALL_SUBTEST(test_evals<ColMajor>());
+ CALL_SUBTEST(test_evals<RowMajor>());
+ CALL_SUBTEST(test_scalar<ColMajor>());
+ CALL_SUBTEST(test_scalar<RowMajor>());
+ CALL_SUBTEST(test_multidims<ColMajor>());
+ CALL_SUBTEST(test_multidims<RowMajor>());
+ CALL_SUBTEST(test_holes<ColMajor>());
+ CALL_SUBTEST(test_holes<RowMajor>());
+ CALL_SUBTEST(test_full_redux<ColMajor>());
+ CALL_SUBTEST(test_full_redux<RowMajor>());
+ CALL_SUBTEST(test_contraction_of_contraction<ColMajor>());
+ CALL_SUBTEST(test_contraction_of_contraction<RowMajor>());
+ CALL_SUBTEST(test_expr<ColMajor>());
+ CALL_SUBTEST(test_expr<RowMajor>());
+ CALL_SUBTEST(test_out_of_order_contraction<ColMajor>());
+ CALL_SUBTEST(test_out_of_order_contraction<RowMajor>());
+ CALL_SUBTEST(test_consistency<ColMajor>());
+ CALL_SUBTEST(test_consistency<RowMajor>());
+ CALL_SUBTEST(test_large_contraction<ColMajor>());
+ CALL_SUBTEST(test_large_contraction<RowMajor>());
+ CALL_SUBTEST(test_matrix_vector<ColMajor>());
+ CALL_SUBTEST(test_matrix_vector<RowMajor>());
+ CALL_SUBTEST(test_tensor_vector<ColMajor>());
+ CALL_SUBTEST(test_tensor_vector<RowMajor>());
+ CALL_SUBTEST(test_small_blocking_factors<ColMajor>());
+ CALL_SUBTEST(test_small_blocking_factors<RowMajor>());
}