aboutsummaryrefslogtreecommitdiffhomepage
path: root/test
diff options
context:
space:
mode:
authorGravatar Deven Desai <deven.desai.amd@gmail.com>2018-07-11 09:17:33 -0400
committerGravatar Deven Desai <deven.desai.amd@gmail.com>2018-07-11 09:17:33 -0400
commit38807a257500cd0746b819c994efab805b8a02e4 (patch)
tree0be837e16ad1dc2b09d8f2be2f752f074b169717 /test
parente2b2c61533cb923ddba41ba7bd64b87f30a25e29 (diff)
parentf00d08cc0a987fa624209b920608b56638404f13 (diff)
merging updates from upstream
Diffstat (limited to 'test')
-rw-r--r--test/block.cpp2
-rw-r--r--test/cuda_basic.cu39
-rw-r--r--test/diagonalmatrices.cpp32
-rw-r--r--test/half_float.cpp30
-rw-r--r--test/is_same_dense.cpp8
-rw-r--r--test/packetmath.cpp18
-rw-r--r--test/product.h11
-rw-r--r--test/product_large.cpp2
-rw-r--r--test/product_notemporary.cpp10
9 files changed, 136 insertions, 16 deletions
diff --git a/test/block.cpp b/test/block.cpp
index 8c4dd87be..9c2424662 100644
--- a/test/block.cpp
+++ b/test/block.cpp
@@ -162,9 +162,11 @@ template<typename MatrixType> void block(const MatrixType& m)
// expressions without direct access
VERIFY_IS_APPROX( ((m1+m2).block(r1,c1,rows-r1,cols-c1).block(r2-r1,c2-c1,rows-r2,cols-c2)) , ((m1+m2).block(r2,c2,rows-r2,cols-c2)) );
VERIFY_IS_APPROX( ((m1+m2).block(r1,c1,r2-r1+1,c2-c1+1).row(0)) , ((m1+m2).row(r1).segment(c1,c2-c1+1)) );
+ VERIFY_IS_APPROX( ((m1+m2).block(r1,c1,r2-r1+1,c2-c1+1).row(0)) , ((m1+m2).eval().row(r1).segment(c1,c2-c1+1)) );
VERIFY_IS_APPROX( ((m1+m2).block(r1,c1,r2-r1+1,c2-c1+1).col(0)) , ((m1+m2).col(c1).segment(r1,r2-r1+1)) );
VERIFY_IS_APPROX( ((m1+m2).block(r1,c1,r2-r1+1,c2-c1+1).transpose().col(0)) , ((m1+m2).row(r1).segment(c1,c2-c1+1)).transpose() );
VERIFY_IS_APPROX( ((m1+m2).transpose().block(c1,r1,c2-c1+1,r2-r1+1).col(0)) , ((m1+m2).row(r1).segment(c1,c2-c1+1)).transpose() );
+ VERIFY_IS_APPROX( ((m1+m2).template block<1,Dynamic>(r1,c1,1,c2-c1+1)) , ((m1+m2).eval().row(r1).segment(c1,c2-c1+1)) );
VERIFY_IS_APPROX( (m1*1).topRows(r1), m1.topRows(r1) );
VERIFY_IS_APPROX( (m1*1).leftCols(c1), m1.leftCols(c1) );
diff --git a/test/cuda_basic.cu b/test/cuda_basic.cu
index ce66c2c78..33e5fd119 100644
--- a/test/cuda_basic.cu
+++ b/test/cuda_basic.cu
@@ -121,7 +121,7 @@ struct diagonal {
};
template<typename T>
-struct eigenvalues {
+struct eigenvalues_direct {
EIGEN_DEVICE_FUNC
void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const
{
@@ -136,6 +136,34 @@ struct eigenvalues {
}
};
+template<typename T>
+struct eigenvalues {
+ EIGEN_DEVICE_FUNC
+ void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const
+ {
+ using namespace Eigen;
+ typedef Matrix<typename T::Scalar, T::RowsAtCompileTime, 1> Vec;
+ T M(in+i);
+ Map<Vec> res(out+i*Vec::MaxSizeAtCompileTime);
+ T A = M*M.adjoint();
+ SelfAdjointEigenSolver<T> eig;
+ eig.compute(M);
+ res = eig.eigenvalues();
+ }
+};
+
+template<typename T>
+struct matrix_inverse {
+ EIGEN_DEVICE_FUNC
+ void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const
+ {
+ using namespace Eigen;
+ T M(in+i);
+ Map<T> res(out+i*T::MaxSizeAtCompileTime);
+ res = M.inverse();
+ }
+};
+
void test_cuda_basic()
{
ei_test_init_cuda();
@@ -163,8 +191,13 @@ void test_cuda_basic()
CALL_SUBTEST( run_and_compare_to_cuda(diagonal<Matrix3f,Vector3f>(), nthreads, in, out) );
CALL_SUBTEST( run_and_compare_to_cuda(diagonal<Matrix4f,Vector4f>(), nthreads, in, out) );
+
+ CALL_SUBTEST( run_and_compare_to_cuda(matrix_inverse<Matrix2f>(), nthreads, in, out) );
+ CALL_SUBTEST( run_and_compare_to_cuda(matrix_inverse<Matrix3f>(), nthreads, in, out) );
+ CALL_SUBTEST( run_and_compare_to_cuda(matrix_inverse<Matrix4f>(), nthreads, in, out) );
- CALL_SUBTEST( run_and_compare_to_cuda(eigenvalues<Matrix3f>(), nthreads, in, out) );
- CALL_SUBTEST( run_and_compare_to_cuda(eigenvalues<Matrix2f>(), nthreads, in, out) );
+ CALL_SUBTEST( run_and_compare_to_cuda(eigenvalues_direct<Matrix3f>(), nthreads, in, out) );
+ CALL_SUBTEST( run_and_compare_to_cuda(eigenvalues_direct<Matrix2f>(), nthreads, in, out) );
+ CALL_SUBTEST( run_and_compare_to_cuda(eigenvalues<Matrix4f>(), nthreads, in, out) );
}
diff --git a/test/diagonalmatrices.cpp b/test/diagonalmatrices.cpp
index a2092c43e..a4ff10239 100644
--- a/test/diagonalmatrices.cpp
+++ b/test/diagonalmatrices.cpp
@@ -30,6 +30,7 @@ template<typename MatrixType> void diagonalmatrices(const MatrixType& m)
v2 = VectorType::Random(rows);
RowVectorType rv1 = RowVectorType::Random(cols),
rv2 = RowVectorType::Random(cols);
+
LeftDiagonalMatrix ldm1(v1), ldm2(v2);
RightDiagonalMatrix rdm1(rv1), rdm2(rv2);
@@ -107,6 +108,32 @@ template<typename MatrixType> void diagonalmatrices(const MatrixType& m)
VERIFY_IS_APPROX( (sq_m1*v1.asDiagonal()).row(i), sq_m2.row(i) );
}
+template<typename MatrixType> void as_scalar_product(const MatrixType& m)
+{
+ typedef typename MatrixType::Scalar Scalar;
+ typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
+ typedef Matrix<Scalar, Dynamic, Dynamic> DynMatrixType;
+ typedef Matrix<Scalar, Dynamic, 1> DynVectorType;
+ typedef Matrix<Scalar, 1, Dynamic> DynRowVectorType;
+
+ Index rows = m.rows();
+ Index depth = internal::random<Index>(1,EIGEN_TEST_MAX_SIZE);
+
+ VectorType v1 = VectorType::Random(rows);
+ DynVectorType dv1 = DynVectorType::Random(depth);
+ DynRowVectorType drv1 = DynRowVectorType::Random(depth);
+ DynMatrixType dm1 = dv1;
+ DynMatrixType drm1 = drv1;
+
+ Scalar s = v1(0);
+
+ VERIFY_IS_APPROX( v1.asDiagonal() * drv1, s*drv1 );
+ VERIFY_IS_APPROX( dv1 * v1.asDiagonal(), dv1*s );
+
+ VERIFY_IS_APPROX( v1.asDiagonal() * drm1, s*drm1 );
+ VERIFY_IS_APPROX( dm1 * v1.asDiagonal(), dm1*s );
+}
+
template<int>
void bug987()
{
@@ -122,14 +149,19 @@ void test_diagonalmatrices()
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( diagonalmatrices(Matrix<float, 1, 1>()) );
+ CALL_SUBTEST_1( as_scalar_product(Matrix<float, 1, 1>()) );
+
CALL_SUBTEST_2( diagonalmatrices(Matrix3f()) );
CALL_SUBTEST_3( diagonalmatrices(Matrix<double,3,3,RowMajor>()) );
CALL_SUBTEST_4( diagonalmatrices(Matrix4d()) );
CALL_SUBTEST_5( diagonalmatrices(Matrix<float,4,4,RowMajor>()) );
CALL_SUBTEST_6( diagonalmatrices(MatrixXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
+ CALL_SUBTEST_6( as_scalar_product(MatrixXcf(1,1)) );
CALL_SUBTEST_7( diagonalmatrices(MatrixXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_8( diagonalmatrices(Matrix<double,Dynamic,Dynamic,RowMajor>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_9( diagonalmatrices(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
+ CALL_SUBTEST_9( diagonalmatrices(MatrixXf(1,1)) );
+ CALL_SUBTEST_9( as_scalar_product(MatrixXf(1,1)) );
}
CALL_SUBTEST_10( bug987<0>() );
}
diff --git a/test/half_float.cpp b/test/half_float.cpp
index 1b0ea9482..5a881680a 100644
--- a/test/half_float.cpp
+++ b/test/half_float.cpp
@@ -257,13 +257,31 @@ void test_array()
ss << a1;
}
+void test_product()
+{
+ typedef Matrix<half,Dynamic,Dynamic> MatrixXh;
+ Index rows = internal::random<Index>(1,EIGEN_TEST_MAX_SIZE);
+ Index cols = internal::random<Index>(1,EIGEN_TEST_MAX_SIZE);
+ Index depth = internal::random<Index>(1,EIGEN_TEST_MAX_SIZE);
+ MatrixXh Ah = MatrixXh::Random(rows,depth);
+ MatrixXh Bh = MatrixXh::Random(depth,cols);
+ MatrixXh Ch = MatrixXh::Random(rows,cols);
+ MatrixXf Af = Ah.cast<float>();
+ MatrixXf Bf = Bh.cast<float>();
+ MatrixXf Cf = Ch.cast<float>();
+ VERIFY_IS_APPROX(Ch.noalias()+=Ah*Bh, (Cf.noalias()+=Af*Bf).cast<half>());
+}
+
void test_half_float()
{
- CALL_SUBTEST(test_conversion());
CALL_SUBTEST(test_numtraits());
- CALL_SUBTEST(test_arithmetic());
- CALL_SUBTEST(test_comparison());
- CALL_SUBTEST(test_basic_functions());
- CALL_SUBTEST(test_trigonometric_functions());
- CALL_SUBTEST(test_array());
+ for(int i = 0; i < g_repeat; i++) {
+ CALL_SUBTEST(test_conversion());
+ CALL_SUBTEST(test_arithmetic());
+ CALL_SUBTEST(test_comparison());
+ CALL_SUBTEST(test_basic_functions());
+ CALL_SUBTEST(test_trigonometric_functions());
+ CALL_SUBTEST(test_array());
+ CALL_SUBTEST(test_product());
+ }
}
diff --git a/test/is_same_dense.cpp b/test/is_same_dense.cpp
index 2c7838ce9..c4e2fbc92 100644
--- a/test/is_same_dense.cpp
+++ b/test/is_same_dense.cpp
@@ -14,9 +14,13 @@ using internal::is_same_dense;
void test_is_same_dense()
{
typedef Matrix<double,Dynamic,Dynamic,ColMajor> ColMatrixXd;
+ typedef Matrix<std::complex<double>,Dynamic,Dynamic,ColMajor> ColMatrixXcd;
ColMatrixXd m1(10,10);
+ ColMatrixXcd m2(10,10);
Ref<ColMatrixXd> ref_m1(m1);
+ Ref<ColMatrixXd,0, Stride<Dynamic,Dynamic> > ref_m2_real(m2.real());
Ref<const ColMatrixXd> const_ref_m1(m1);
+
VERIFY(is_same_dense(m1,m1));
VERIFY(is_same_dense(m1,ref_m1));
VERIFY(is_same_dense(const_ref_m1,m1));
@@ -30,4 +34,8 @@ void test_is_same_dense()
Ref<const ColMatrixXd> const_ref_m1_col(m1.col(1));
VERIFY(is_same_dense(m1.col(1),const_ref_m1_col));
+
+
+ VERIFY(!is_same_dense(m1, ref_m2_real));
+ VERIFY(!is_same_dense(m2, ref_m2_real));
}
diff --git a/test/packetmath.cpp b/test/packetmath.cpp
index 4b19be92d..56e017383 100644
--- a/test/packetmath.cpp
+++ b/test/packetmath.cpp
@@ -123,7 +123,7 @@ template<typename Scalar> void packetmath()
EIGEN_ALIGN_MAX Scalar data2[size];
EIGEN_ALIGN_MAX Packet packets[PacketSize*2];
EIGEN_ALIGN_MAX Scalar ref[size];
- RealScalar refvalue = 0;
+ RealScalar refvalue = RealScalar(0);
for (int i=0; i<size; ++i)
{
data1[i] = internal::random<Scalar>()/RealScalar(PacketSize);
@@ -171,14 +171,18 @@ template<typename Scalar> void packetmath()
for (int i=0; i<PacketSize; ++i)
ref[i] = data1[i+offset];
+ // palign is not used anymore, so let's just put a warning if it fails
+ ++g_test_level;
VERIFY(areApprox(ref, data2, PacketSize) && "internal::palign");
+ --g_test_level;
}
VERIFY((!PacketTraits::Vectorizable) || PacketTraits::HasAdd);
VERIFY((!PacketTraits::Vectorizable) || PacketTraits::HasSub);
VERIFY((!PacketTraits::Vectorizable) || PacketTraits::HasMul);
VERIFY((!PacketTraits::Vectorizable) || PacketTraits::HasNegate);
- VERIFY((internal::is_same<Scalar,int>::value) || (!PacketTraits::Vectorizable) || PacketTraits::HasDiv);
+ // Disabled as it is not clear why it would be mandatory to support division.
+ //VERIFY((internal::is_same<Scalar,int>::value) || (!PacketTraits::Vectorizable) || PacketTraits::HasDiv);
CHECK_CWISE2_IF(PacketTraits::HasAdd, REF_ADD, internal::padd);
CHECK_CWISE2_IF(PacketTraits::HasSub, REF_SUB, internal::psub);
@@ -242,29 +246,30 @@ template<typename Scalar> void packetmath()
}
}
- ref[0] = 0;
+ ref[0] = Scalar(0);
for (int i=0; i<PacketSize; ++i)
ref[0] += data1[i];
VERIFY(isApproxAbs(ref[0], internal::predux(internal::pload<Packet>(data1)), refvalue) && "internal::predux");
+ if(PacketSize==8 && internal::unpacket_traits<typename internal::unpacket_traits<Packet>::half>::size ==4) // so far, predux_half_dowto4 is only required in such a case
{
int HalfPacketSize = PacketSize>4 ? PacketSize/2 : PacketSize;
for (int i=0; i<HalfPacketSize; ++i)
- ref[i] = 0;
+ ref[i] = Scalar(0);
for (int i=0; i<PacketSize; ++i)
ref[i%HalfPacketSize] += data1[i];
internal::pstore(data2, internal::predux_half_dowto4(internal::pload<Packet>(data1)));
VERIFY(areApprox(ref, data2, HalfPacketSize) && "internal::predux_half_dowto4");
}
- ref[0] = 1;
+ ref[0] = Scalar(1);
for (int i=0; i<PacketSize; ++i)
ref[0] *= data1[i];
VERIFY(internal::isApprox(ref[0], internal::predux_mul(internal::pload<Packet>(data1))) && "internal::predux_mul");
for (int j=0; j<PacketSize; ++j)
{
- ref[j] = 0;
+ ref[j] = Scalar(0);
for (int i=0; i<PacketSize; ++i)
ref[j] += data1[i+j*PacketSize];
packets[j] = internal::pload<Packet>(data1+j*PacketSize);
@@ -630,6 +635,7 @@ void test_packetmath()
CALL_SUBTEST_3( packetmath<int>() );
CALL_SUBTEST_4( packetmath<std::complex<float> >() );
CALL_SUBTEST_5( packetmath<std::complex<double> >() );
+ CALL_SUBTEST_6( packetmath<half>() );
CALL_SUBTEST_1( packetmath_notcomplex<float>() );
CALL_SUBTEST_2( packetmath_notcomplex<double>() );
diff --git a/test/product.h b/test/product.h
index 0425a929e..d26e8063d 100644
--- a/test/product.h
+++ b/test/product.h
@@ -111,6 +111,17 @@ template<typename MatrixType> void product(const MatrixType& m)
vcres.noalias() -= m1.transpose() * v1;
VERIFY_IS_APPROX(vcres, vc2 - m1.transpose() * v1);
+ // test scaled products
+ res = square;
+ res.noalias() = s1 * m1 * m2.transpose();
+ VERIFY_IS_APPROX(res, ((s1*m1).eval() * m2.transpose()));
+ res = square;
+ res.noalias() += s1 * m1 * m2.transpose();
+ VERIFY_IS_APPROX(res, square + ((s1*m1).eval() * m2.transpose()));
+ res = square;
+ res.noalias() -= s1 * m1 * m2.transpose();
+ VERIFY_IS_APPROX(res, square - ((s1*m1).eval() * m2.transpose()));
+
// test d ?= a+b*c rules
res.noalias() = square + m1 * m2.transpose();
VERIFY_IS_APPROX(res, square + m1 * m2.transpose());
diff --git a/test/product_large.cpp b/test/product_large.cpp
index 845cd40ca..14a4f739d 100644
--- a/test/product_large.cpp
+++ b/test/product_large.cpp
@@ -35,6 +35,8 @@ void test_product_large()
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( product(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_2( product(MatrixXd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
+ CALL_SUBTEST_2( product(MatrixXd(internal::random<int>(1,10), internal::random<int>(1,10))) );
+
CALL_SUBTEST_3( product(MatrixXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_4( product(MatrixXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2), internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2))) );
CALL_SUBTEST_5( product(Matrix<float,Dynamic,Dynamic,RowMajor>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
diff --git a/test/product_notemporary.cpp b/test/product_notemporary.cpp
index 30592b79e..062180f42 100644
--- a/test/product_notemporary.cpp
+++ b/test/product_notemporary.cpp
@@ -128,11 +128,19 @@ template<typename MatrixType> void product_notemporary(const MatrixType& m)
VERIFY_EVALUATION_COUNT( cvres.noalias() = (rm3+rm3) * (m1*cv1), 1 );
// Check outer products
+ #ifdef EIGEN_ALLOCA
+ bool temp_via_alloca = m3.rows()*sizeof(Scalar) <= EIGEN_STACK_ALLOCATION_LIMIT;
+ #else
+ bool temp_via_alloca = false;
+ #endif
m3 = cv1 * rv1;
VERIFY_EVALUATION_COUNT( m3.noalias() = cv1 * rv1, 0 );
- VERIFY_EVALUATION_COUNT( m3.noalias() = (cv1+cv1) * (rv1+rv1), 1 );
+ VERIFY_EVALUATION_COUNT( m3.noalias() = (cv1+cv1) * (rv1+rv1), temp_via_alloca ? 0 : 1 );
VERIFY_EVALUATION_COUNT( m3.noalias() = (m1*cv1) * (rv1), 1 );
VERIFY_EVALUATION_COUNT( m3.noalias() += (m1*cv1) * (rv1), 1 );
+ rm3 = cv1 * rv1;
+ VERIFY_EVALUATION_COUNT( rm3.noalias() = cv1 * rv1, 0 );
+ VERIFY_EVALUATION_COUNT( rm3.noalias() = (cv1+cv1) * (rv1+rv1), temp_via_alloca ? 0 : 1 );
VERIFY_EVALUATION_COUNT( rm3.noalias() = (cv1) * (rv1 * m1), 1 );
VERIFY_EVALUATION_COUNT( rm3.noalias() -= (cv1) * (rv1 * m1), 1 );
VERIFY_EVALUATION_COUNT( rm3.noalias() = (m1*cv1) * (rv1 * m1), 2 );