aboutsummaryrefslogtreecommitdiffhomepage
path: root/test
diff options
context:
space:
mode:
authorGravatar Gael Guennebaud <g.gael@free.fr>2016-06-23 15:29:57 +0200
committerGravatar Gael Guennebaud <g.gael@free.fr>2016-06-23 15:29:57 +0200
commit7c6561485a64255c0d316bdaa8e0c460c1ecf851 (patch)
treeacef5cfd70d36b7708535252e0ea91037e5e78f7 /test
parenta3f7edf7e7672094190e04a0b4417de1abfa3de5 (diff)
parent76faf4a9657efeed089aeedc98a769410c32d3d7 (diff)
merge PR 194
Diffstat (limited to 'test')
-rw-r--r--test/array.cpp4
-rw-r--r--test/array_for_matrix.cpp2
-rw-r--r--test/evaluators.cpp14
-rw-r--r--test/geo_alignedbox.cpp2
-rw-r--r--test/linearstructure.cpp18
-rw-r--r--test/mixingtypes.cpp96
-rw-r--r--test/nesting_ops.cpp4
-rw-r--r--test/vectorization_logic.cpp7
8 files changed, 118 insertions, 29 deletions
diff --git a/test/array.cpp b/test/array.cpp
index 4cd4f262b..0416ec5d2 100644
--- a/test/array.cpp
+++ b/test/array.cpp
@@ -72,7 +72,7 @@ template<typename ArrayType> void array(const ArrayType& m)
VERIFY_IS_MUCH_SMALLER_THAN(abs(m1.rowwise().sum().sum() - m1.sum()), m1.abs().sum());
if (!internal::isMuchSmallerThan(abs(m1.sum() - (m1+m2).sum()), m1.abs().sum(), test_precision<Scalar>()))
VERIFY_IS_NOT_APPROX(((m1+m2).rowwise().sum()).sum(), m1.sum());
- VERIFY_IS_APPROX(m1.colwise().sum(), m1.colwise().redux(internal::scalar_sum_op<Scalar>()));
+ VERIFY_IS_APPROX(m1.colwise().sum(), m1.colwise().redux(internal::scalar_sum_op<Scalar,Scalar>()));
// vector-wise ops
m3 = m1;
@@ -807,7 +807,7 @@ void test_array()
VERIFY((internal::is_same< internal::global_math_functions_filtering_base<int>::type, int >::value));
VERIFY((internal::is_same< internal::global_math_functions_filtering_base<float>::type, float >::value));
VERIFY((internal::is_same< internal::global_math_functions_filtering_base<Array2i>::type, ArrayBase<Array2i> >::value));
- typedef CwiseUnaryOp<internal::scalar_multiple_op<double>, ArrayXd > Xpr;
+ typedef CwiseUnaryOp<internal::scalar_abs_op<double>, ArrayXd > Xpr;
VERIFY((internal::is_same< internal::global_math_functions_filtering_base<Xpr>::type,
ArrayBase<Xpr>
>::value));
diff --git a/test/array_for_matrix.cpp b/test/array_for_matrix.cpp
index 75e6a778f..97e03be83 100644
--- a/test/array_for_matrix.cpp
+++ b/test/array_for_matrix.cpp
@@ -45,7 +45,7 @@ template<typename MatrixType> void array_for_matrix(const MatrixType& m)
VERIFY_IS_MUCH_SMALLER_THAN(m1.rowwise().sum().sum() - m1.sum(), m1.squaredNorm());
VERIFY_IS_MUCH_SMALLER_THAN(m1.colwise().sum() + m2.colwise().sum() - (m1+m2).colwise().sum(), (m1+m2).squaredNorm());
VERIFY_IS_MUCH_SMALLER_THAN(m1.rowwise().sum() - m2.rowwise().sum() - (m1-m2).rowwise().sum(), (m1-m2).squaredNorm());
- VERIFY_IS_APPROX(m1.colwise().sum(), m1.colwise().redux(internal::scalar_sum_op<Scalar>()));
+ VERIFY_IS_APPROX(m1.colwise().sum(), m1.colwise().redux(internal::scalar_sum_op<Scalar,Scalar>()));
// vector-wise ops
m3 = m1;
diff --git a/test/evaluators.cpp b/test/evaluators.cpp
index 876dffe22..aed5a05a7 100644
--- a/test/evaluators.cpp
+++ b/test/evaluators.cpp
@@ -21,7 +21,7 @@ namespace Eigen {
EIGEN_STRONG_INLINE
DstXprType& copy_using_evaluator(const EigenBase<DstXprType> &dst, const SrcXprType &src)
{
- call_assignment(dst.const_cast_derived(), src.derived(), internal::assign_op<typename DstXprType::Scalar>());
+ call_assignment(dst.const_cast_derived(), src.derived(), internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>());
return dst.const_cast_derived();
}
@@ -29,7 +29,7 @@ namespace Eigen {
EIGEN_STRONG_INLINE
const DstXprType& copy_using_evaluator(const NoAlias<DstXprType, StorageBase>& dst, const SrcXprType &src)
{
- call_assignment(dst, src.derived(), internal::assign_op<typename DstXprType::Scalar>());
+ call_assignment(dst, src.derived(), internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>());
return dst.expression();
}
@@ -45,7 +45,7 @@ namespace Eigen {
dst.const_cast_derived().resizeLike(src.derived());
#endif
- call_assignment(dst.const_cast_derived(), src.derived(), internal::assign_op<typename DstXprType::Scalar>());
+ call_assignment(dst.const_cast_derived(), src.derived(), internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>());
return dst.const_cast_derived();
}
@@ -53,28 +53,28 @@ namespace Eigen {
void add_assign_using_evaluator(const DstXprType& dst, const SrcXprType& src)
{
typedef typename DstXprType::Scalar Scalar;
- call_assignment(const_cast<DstXprType&>(dst), src.derived(), internal::add_assign_op<Scalar>());
+ call_assignment(const_cast<DstXprType&>(dst), src.derived(), internal::add_assign_op<Scalar,typename SrcXprType::Scalar>());
}
template<typename DstXprType, typename SrcXprType>
void subtract_assign_using_evaluator(const DstXprType& dst, const SrcXprType& src)
{
typedef typename DstXprType::Scalar Scalar;
- call_assignment(const_cast<DstXprType&>(dst), src.derived(), internal::sub_assign_op<Scalar>());
+ call_assignment(const_cast<DstXprType&>(dst), src.derived(), internal::sub_assign_op<Scalar,typename SrcXprType::Scalar>());
}
template<typename DstXprType, typename SrcXprType>
void multiply_assign_using_evaluator(const DstXprType& dst, const SrcXprType& src)
{
typedef typename DstXprType::Scalar Scalar;
- call_assignment(dst.const_cast_derived(), src.derived(), internal::mul_assign_op<Scalar>());
+ call_assignment(dst.const_cast_derived(), src.derived(), internal::mul_assign_op<Scalar,typename SrcXprType::Scalar>());
}
template<typename DstXprType, typename SrcXprType>
void divide_assign_using_evaluator(const DstXprType& dst, const SrcXprType& src)
{
typedef typename DstXprType::Scalar Scalar;
- call_assignment(dst.const_cast_derived(), src.derived(), internal::div_assign_op<Scalar>());
+ call_assignment(dst.const_cast_derived(), src.derived(), internal::div_assign_op<Scalar,typename SrcXprType::Scalar>());
}
template<typename DstXprType, typename SrcXprType>
diff --git a/test/geo_alignedbox.cpp b/test/geo_alignedbox.cpp
index 2bdb4b7f2..ba3378aab 100644
--- a/test/geo_alignedbox.cpp
+++ b/test/geo_alignedbox.cpp
@@ -48,6 +48,8 @@ template<typename BoxType> void alignedbox(const BoxType& _box)
b0.extend(p0);
b0.extend(p1);
VERIFY(b0.contains(p0*s1+(Scalar(1)-s1)*p1));
+ VERIFY(b0.contains(b0.center()));
+ VERIFY(b0.center()==(p0+p1)/Scalar(2));
(b2 = b0).extend(b1);
VERIFY(b2.contains(b0));
diff --git a/test/linearstructure.cpp b/test/linearstructure.cpp
index e7f4b3dc5..17474af10 100644
--- a/test/linearstructure.cpp
+++ b/test/linearstructure.cpp
@@ -9,7 +9,7 @@
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
static bool g_called;
-#define EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN { g_called = true; }
+#define EIGEN_SCALAR_BINARY_OP_PLUGIN { g_called |= (!internal::is_same<LhsScalar,RhsScalar>::value); }
#include "main.h"
@@ -93,6 +93,22 @@ template<typename MatrixType> void real_complex(DenseIndex rows = MatrixType::Ro
g_called = false;
VERIFY_IS_APPROX(m1/s, m1/Scalar(s));
VERIFY(g_called && "matrix<complex> / real not properly optimized");
+
+ g_called = false;
+ VERIFY_IS_APPROX(s+m1.array(), Scalar(s)+m1.array());
+ VERIFY(g_called && "real + matrix<complex> not properly optimized");
+
+ g_called = false;
+ VERIFY_IS_APPROX(m1.array()+s, m1.array()+Scalar(s));
+ VERIFY(g_called && "matrix<complex> + real not properly optimized");
+
+ g_called = false;
+ VERIFY_IS_APPROX(s-m1.array(), Scalar(s)-m1.array());
+ VERIFY(g_called && "real - matrix<complex> not properly optimized");
+
+ g_called = false;
+ VERIFY_IS_APPROX(m1.array()-s, m1.array()-Scalar(s));
+ VERIFY(g_called && "matrix<complex> - real not properly optimized");
}
void test_linearstructure()
diff --git a/test/mixingtypes.cpp b/test/mixingtypes.cpp
index dbcf468ea..57ef85c32 100644
--- a/test/mixingtypes.cpp
+++ b/test/mixingtypes.cpp
@@ -23,10 +23,18 @@
#endif
+static bool g_called;
+#define EIGEN_SCALAR_BINARY_OP_PLUGIN { g_called |= (!internal::is_same<LhsScalar,RhsScalar>::value); }
+
#include "main.h"
using namespace std;
+#define VERIFY_MIX_SCALAR(XPR,REF) \
+ g_called = false; \
+ VERIFY_IS_APPROX(XPR,REF); \
+ VERIFY( g_called && #XPR" not properly optimized");
+
template<int SizeAtCompileType> void mixingtypes(int size = SizeAtCompileType)
{
typedef std::complex<float> CF;
@@ -42,6 +50,7 @@ template<int SizeAtCompileType> void mixingtypes(int size = SizeAtCompileType)
Mat_f mf = Mat_f::Random(size,size);
Mat_d md = mf.template cast<double>();
+ //Mat_d rd = md;
Mat_cf mcf = Mat_cf::Random(size,size);
Mat_cd mcd = mcf.template cast<complex<double> >();
Mat_cd rcd = mcd;
@@ -56,23 +65,50 @@ template<int SizeAtCompileType> void mixingtypes(int size = SizeAtCompileType)
mf+mf;
- VERIFY_RAISES_ASSERT(mf+md);
-#if !EIGEN_HAS_STD_RESULT_OF
- // this one does not even compile with C++11
- VERIFY_RAISES_ASSERT(mf+mcf);
-#endif
+
+// VERIFY_RAISES_ASSERT(mf+md); // does not even compile
#ifdef EIGEN_DONT_VECTORIZE
VERIFY_RAISES_ASSERT(vf=vd);
VERIFY_RAISES_ASSERT(vf+=vd);
- VERIFY_RAISES_ASSERT(mcd=md);
#endif
// check scalar products
- VERIFY_IS_APPROX(vcf * sf , vcf * complex<float>(sf));
- VERIFY_IS_APPROX(sd * vcd, complex<double>(sd) * vcd);
- VERIFY_IS_APPROX(vf * scf , vf.template cast<complex<float> >() * scf);
- VERIFY_IS_APPROX(scd * vd, scd * vd.template cast<complex<double> >());
+ VERIFY_MIX_SCALAR(vcf * sf , vcf * complex<float>(sf));
+ VERIFY_MIX_SCALAR(sd * vcd , complex<double>(sd) * vcd);
+ VERIFY_MIX_SCALAR(vf * scf , vf.template cast<complex<float> >() * scf);
+ VERIFY_MIX_SCALAR(scd * vd , scd * vd.template cast<complex<double> >());
+
+ VERIFY_MIX_SCALAR(vcf * 2 , vcf * complex<float>(2));
+ VERIFY_MIX_SCALAR(vcf * 2.1 , vcf * complex<float>(2.1));
+ VERIFY_MIX_SCALAR(2 * vcf, vcf * complex<float>(2));
+ VERIFY_MIX_SCALAR(2.1 * vcf , vcf * complex<float>(2.1));
+
+ // check scalar quotients
+ VERIFY_MIX_SCALAR(vcf / sf , vcf / complex<float>(sf));
+ VERIFY_MIX_SCALAR(vf / scf , vf.template cast<complex<float> >() / scf);
+ VERIFY_MIX_SCALAR(vf.array() / scf, vf.template cast<complex<float> >().array() / scf);
+ VERIFY_MIX_SCALAR(scd / vd.array() , scd / vd.template cast<complex<double> >().array());
+
+ // check scalar increment
+ VERIFY_MIX_SCALAR(vcf.array() + sf , vcf.array() + complex<float>(sf));
+ VERIFY_MIX_SCALAR(sd + vcd.array(), complex<double>(sd) + vcd.array());
+ VERIFY_MIX_SCALAR(vf.array() + scf, vf.template cast<complex<float> >().array() + scf);
+ VERIFY_MIX_SCALAR(scd + vd.array() , scd + vd.template cast<complex<double> >().array());
+
+ // check scalar subtractions
+ VERIFY_MIX_SCALAR(vcf.array() - sf , vcf.array() - complex<float>(sf));
+ VERIFY_MIX_SCALAR(sd - vcd.array(), complex<double>(sd) - vcd.array());
+ VERIFY_MIX_SCALAR(vf.array() - scf, vf.template cast<complex<float> >().array() - scf);
+ VERIFY_MIX_SCALAR(scd - vd.array() , scd - vd.template cast<complex<double> >().array());
+
+ // check scalar powers
+ VERIFY_MIX_SCALAR( pow(vcf.array(), sf), pow(vcf.array(), complex<float>(sf)) );
+ VERIFY_MIX_SCALAR( vcf.array().pow(sf) , pow(vcf.array(), complex<float>(sf)) );
+ VERIFY_MIX_SCALAR( pow(sd, vcd.array()), pow(complex<double>(sd), vcd.array()) );
+ VERIFY_MIX_SCALAR( pow(vf.array(), scf), pow(vf.template cast<complex<float> >().array(), scf) );
+ VERIFY_MIX_SCALAR( vf.array().pow(scf) , pow(vf.template cast<complex<float> >().array(), scf) );
+ VERIFY_MIX_SCALAR( pow(scd, vd.array()), pow(scd, vd.template cast<complex<double> >().array()) );
// check dot product
vf.dot(vf);
@@ -186,16 +222,50 @@ template<int SizeAtCompileType> void mixingtypes(int size = SizeAtCompileType)
Mat_cd((scd * md.template cast<CD>().eval() * mcd).template triangularView<Upper>()));
- VERIFY_IS_APPROX( md.array() * mcd.array(), md.template cast<CD>().eval().array() * mcd.array() );
- VERIFY_IS_APPROX( mcd.array() * md.array(), mcd.array() * md.template cast<CD>().eval().array() );
-// VERIFY_IS_APPROX( md.array() / mcd.array(), md.template cast<CD>().eval().array() / mcd.array() );
+ VERIFY_IS_APPROX( md.array() * mcd.array(), md.template cast<CD>().eval().array() * mcd.array() );
+ VERIFY_IS_APPROX( mcd.array() * md.array(), mcd.array() * md.template cast<CD>().eval().array() );
+
+ VERIFY_IS_APPROX( md.array() + mcd.array(), md.template cast<CD>().eval().array() + mcd.array() );
+ VERIFY_IS_APPROX( mcd.array() + md.array(), mcd.array() + md.template cast<CD>().eval().array() );
+
+ VERIFY_IS_APPROX( md.array() - mcd.array(), md.template cast<CD>().eval().array() - mcd.array() );
+ VERIFY_IS_APPROX( mcd.array() - md.array(), mcd.array() - md.template cast<CD>().eval().array() );
+
+ VERIFY_IS_APPROX( md.array() / mcd.array(), md.template cast<CD>().eval().array() / mcd.array() );
VERIFY_IS_APPROX( mcd.array() / md.array(), mcd.array() / md.template cast<CD>().eval().array() );
+ VERIFY_IS_APPROX( md.array().pow(mcd.array()), md.template cast<CD>().eval().array().pow(mcd.array()) );
+ VERIFY_IS_APPROX( mcd.array().pow(md.array()), mcd.array().pow(md.template cast<CD>().eval().array()) );
+
+ VERIFY_IS_APPROX( pow(md.array(),mcd.array()), md.template cast<CD>().eval().array().pow(mcd.array()) );
+ VERIFY_IS_APPROX( pow(mcd.array(),md.array()), mcd.array().pow(md.template cast<CD>().eval().array()) );
+
+ rcd = mcd;
+ VERIFY_IS_APPROX( rcd = md, md.template cast<CD>().eval() );
+ rcd = mcd;
+ VERIFY_IS_APPROX( rcd += md, mcd + md.template cast<CD>().eval() );
+ rcd = mcd;
+ VERIFY_IS_APPROX( rcd -= md, mcd - md.template cast<CD>().eval() );
rcd = mcd;
VERIFY_IS_APPROX( rcd.array() *= md.array(), mcd.array() * md.template cast<CD>().eval().array() );
rcd = mcd;
VERIFY_IS_APPROX( rcd.array() /= md.array(), mcd.array() / md.template cast<CD>().eval().array() );
+
+ rcd = mcd;
+ VERIFY_IS_APPROX( rcd.noalias() += md + mcd*md, mcd + (md.template cast<CD>().eval()) + mcd*(md.template cast<CD>().eval()));
+
+ VERIFY_IS_APPROX( rcd.noalias() = md*md, ((md*md).eval().template cast<CD>()) );
+ rcd = mcd;
+ VERIFY_IS_APPROX( rcd.noalias() += md*md, mcd + ((md*md).eval().template cast<CD>()) );
+ rcd = mcd;
+ VERIFY_IS_APPROX( rcd.noalias() -= md*md, mcd - ((md*md).eval().template cast<CD>()) );
+
+ VERIFY_IS_APPROX( rcd.noalias() = mcd + md*md, mcd + ((md*md).eval().template cast<CD>()) );
+ rcd = mcd;
+ VERIFY_IS_APPROX( rcd.noalias() += mcd + md*md, mcd + mcd + ((md*md).eval().template cast<CD>()) );
+ rcd = mcd;
+ VERIFY_IS_APPROX( rcd.noalias() -= mcd + md*md, - ((md*md).eval().template cast<CD>()) );
}
void test_mixingtypes()
diff --git a/test/nesting_ops.cpp b/test/nesting_ops.cpp
index 2f5025305..a419b0e44 100644
--- a/test/nesting_ops.cpp
+++ b/test/nesting_ops.cpp
@@ -75,8 +75,8 @@ template <typename MatrixType> void run_nesting_ops_2(const MatrixType& _m)
}
else
{
- VERIFY( verify_eval_type<1>(2*m1, 2*m1) );
- VERIFY( verify_eval_type<2>(2*m1, m1) );
+ VERIFY( verify_eval_type<2>(2*m1, 2*m1) );
+ VERIFY( verify_eval_type<3>(2*m1, m1) );
}
VERIFY( verify_eval_type<2>(m1+m1, m1+m1) );
VERIFY( verify_eval_type<3>(m1+m1, m1) );
diff --git a/test/vectorization_logic.cpp b/test/vectorization_logic.cpp
index 24a7641ff..b7c2df64b 100644
--- a/test/vectorization_logic.cpp
+++ b/test/vectorization_logic.cpp
@@ -29,7 +29,7 @@ using internal::demangle_unrolling;
template<typename Dst, typename Src>
bool test_assign(const Dst&, const Src&, int traversal, int unrolling)
{
- typedef internal::copy_using_evaluator_traits<internal::evaluator<Dst>,internal::evaluator<Src>, internal::assign_op<typename Dst::Scalar> > traits;
+ typedef internal::copy_using_evaluator_traits<internal::evaluator<Dst>,internal::evaluator<Src>, internal::assign_op<typename Dst::Scalar,typename Src::Scalar> > traits;
bool res = traits::Traversal==traversal;
if(unrolling==InnerUnrolling+CompleteUnrolling)
res = res && (int(traits::Unrolling)==InnerUnrolling || int(traits::Unrolling)==CompleteUnrolling);
@@ -53,7 +53,7 @@ bool test_assign(const Dst&, const Src&, int traversal, int unrolling)
template<typename Dst, typename Src>
bool test_assign(int traversal, int unrolling)
{
- typedef internal::copy_using_evaluator_traits<internal::evaluator<Dst>,internal::evaluator<Src>, internal::assign_op<typename Dst::Scalar> > traits;
+ typedef internal::copy_using_evaluator_traits<internal::evaluator<Dst>,internal::evaluator<Src>, internal::assign_op<typename Dst::Scalar,typename Src::Scalar> > traits;
bool res = traits::Traversal==traversal && traits::Unrolling==unrolling;
if(!res)
{
@@ -73,7 +73,8 @@ bool test_assign(int traversal, int unrolling)
template<typename Xpr>
bool test_redux(const Xpr&, int traversal, int unrolling)
{
- typedef internal::redux_traits<internal::scalar_sum_op<typename Xpr::Scalar>,internal::redux_evaluator<Xpr> > traits;
+ typedef typename Xpr::Scalar Scalar;
+ typedef internal::redux_traits<internal::scalar_sum_op<Scalar,Scalar>,internal::redux_evaluator<Xpr> > traits;
bool res = traits::Traversal==traversal && traits::Unrolling==unrolling;
if(!res)