From 36e6c9064fc68d5c47473f6d251da10e96ad42b3 Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Fri, 18 Jul 2014 14:19:18 +0200 Subject: bug #770: fix out of bounds access --- unsupported/Eigen/MPRealSupport | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'unsupported/Eigen') diff --git a/unsupported/Eigen/MPRealSupport b/unsupported/Eigen/MPRealSupport index 35d77e5bd..0584e470e 100644 --- a/unsupported/Eigen/MPRealSupport +++ b/unsupported/Eigen/MPRealSupport @@ -157,9 +157,12 @@ int main() void operator()(mpreal* res, Index resStride, const mpreal* blockA, const mpreal* blockB, Index rows, Index depth, Index cols, mpreal alpha, Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0) { + if(rows==0 || cols==0 || depth==0) + return; + mpreal acc1(0,mpfr_get_prec(blockA[0].mpfr_srcptr())), tmp (0,mpfr_get_prec(blockA[0].mpfr_srcptr())); - + if(strideA==-1) strideA = depth; if(strideB==-1) strideB = depth; -- cgit v1.2.3 From 280661e67d053b4e7f3358b160bcc11f76704be9 Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Tue, 2 Sep 2014 17:29:06 +0200 Subject: Remove LM::sqrt_() member function in favor of a shortcut for sqrt(epsilon()) --- .../src/NonLinearOptimization/LevenbergMarquardt.h | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'unsupported/Eigen') diff --git a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h index ecb8dccf4..69106ddc5 100644 --- a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h +++ b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h @@ -45,18 +45,24 @@ namespace LevenbergMarquardtSpace { template class LevenbergMarquardt { + static Scalar sqrt_epsilon() + { + using std::sqrt; + return sqrt(NumTraits::epsilon()); + } + public: LevenbergMarquardt(FunctorType &_functor) : functor(_functor) { nfev = njev = iter = 0; fnorm = gnorm = 0.; useExternalScaling=false; } typedef DenseIndex Index; - + struct Parameters { Parameters() : factor(Scalar(100.)) , maxfev(400) - , ftol(sqrt_(NumTraits::epsilon())) - , xtol(sqrt_(NumTraits::epsilon())) + , ftol(sqrt_epsilon()) + , xtol(sqrt_epsilon()) , gtol(Scalar(0.)) , epsfcn(Scalar(0.)) {} Scalar factor; @@ -72,7 +78,7 @@ public: LevenbergMarquardtSpace::Status lmder1( FVectorType &x, - const Scalar tol = sqrt_(NumTraits::epsilon()) + const Scalar tol = sqrt_epsilon() ); LevenbergMarquardtSpace::Status minimize(FVectorType &x); @@ -83,12 +89,12 @@ public: FunctorType &functor, FVectorType &x, Index *nfev, - const Scalar tol = sqrt_(NumTraits::epsilon()) + const Scalar tol = sqrt_epsilon() ); LevenbergMarquardtSpace::Status lmstr1( FVectorType &x, - const Scalar tol = sqrt_(NumTraits::epsilon()) + const Scalar tol = sqrt_epsilon() ); LevenbergMarquardtSpace::Status minimizeOptimumStorage(FVectorType &x); @@ -109,7 +115,6 @@ public: Scalar lm_param(void) { return par; } private: - static Scalar sqrt_(const Scalar& x) { using std::sqrt; return sqrt(x); } FunctorType &functor; Index n; -- cgit v1.2.3 From abb33258ce52a8cc3b540fb7cafb1812d9b71dd9 Mon Sep 17 00:00:00 2001 From: Jitse Niesen Date: Sat, 6 Sep 2014 14:59:44 +0100 Subject: Doc: difference between array and matrix cosine etc (bug #830) --- Eigen/src/plugins/ArrayCwiseUnaryOps.h | 18 ++++++++++++++++++ unsupported/Eigen/MatrixFunctions | 21 +++++++++++++++++---- 2 files changed, 35 insertions(+), 4 deletions(-) (limited to 'unsupported/Eigen') diff --git a/Eigen/src/plugins/ArrayCwiseUnaryOps.h b/Eigen/src/plugins/ArrayCwiseUnaryOps.h index ce462e951..f6d7d8944 100644 --- a/Eigen/src/plugins/ArrayCwiseUnaryOps.h +++ b/Eigen/src/plugins/ArrayCwiseUnaryOps.h @@ -29,6 +29,9 @@ abs2() const } /** \returns an expression of the coefficient-wise exponential of *this. + * + * This function computes the coefficient-wise exponential. The function MatrixBase::exp() in the + * unsupported module MatrixFunctions computes the matrix exponential. * * Example: \include Cwise_exp.cpp * Output: \verbinclude Cwise_exp.out @@ -43,6 +46,9 @@ exp() const } /** \returns an expression of the coefficient-wise logarithm of *this. + * + * This function computes the coefficient-wise logarithm. The function MatrixBase::log() in the + * unsupported module MatrixFunctions computes the matrix logarithm. * * Example: \include Cwise_log.cpp * Output: \verbinclude Cwise_log.out @@ -57,6 +63,9 @@ log() const } /** \returns an expression of the coefficient-wise square root of *this. + * + * This function computes the coefficient-wise square root. The function MatrixBase::sqrt() in the + * unsupported module MatrixFunctions computes the matrix square root. * * Example: \include Cwise_sqrt.cpp * Output: \verbinclude Cwise_sqrt.out @@ -71,6 +80,9 @@ sqrt() const } /** \returns an expression of the coefficient-wise cosine of *this. + * + * This function computes the coefficient-wise cosine. The function MatrixBase::cos() in the + * unsupported module MatrixFunctions computes the matrix cosine. * * Example: \include Cwise_cos.cpp * Output: \verbinclude Cwise_cos.out @@ -86,6 +98,9 @@ cos() const /** \returns an expression of the coefficient-wise sine of *this. + * + * This function computes the coefficient-wise sine. The function MatrixBase::sin() in the + * unsupported module MatrixFunctions computes the matrix sine. * * Example: \include Cwise_sin.cpp * Output: \verbinclude Cwise_sin.out @@ -155,6 +170,9 @@ atan() const } /** \returns an expression of the coefficient-wise power of *this to the given exponent. + * + * This function computes the coefficient-wise power. The function MatrixBase::pow() in the + * unsupported module MatrixFunctions computes the matrix power. * * Example: \include Cwise_pow.cpp * Output: \verbinclude Cwise_pow.out diff --git a/unsupported/Eigen/MatrixFunctions b/unsupported/Eigen/MatrixFunctions index 0b12aaffb..0320606c1 100644 --- a/unsupported/Eigen/MatrixFunctions +++ b/unsupported/Eigen/MatrixFunctions @@ -82,7 +82,9 @@ const MatrixFunctionReturnValue MatrixBase::cos() const \param[in] M a square matrix. \returns expression representing \f$ \cos(M) \f$. -This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::cos(). +This function computes the matrix cosine. Use ArrayBase::cos() for computing the entry-wise cosine. + +The implementation calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::cos(). \sa \ref matrixbase_sin "sin()" for an example. @@ -123,6 +125,9 @@ differential equations: the solution of \f$ y' = My \f$ with the initial condition \f$ y(0) = y_0 \f$ is given by \f$ y(t) = \exp(M) y_0 \f$. +The matrix exponential is different from applying the exp function to all the entries in the matrix. +Use ArrayBase::exp() if you want to do the latter. + The cost of the computation is approximately \f$ 20 n^3 \f$ for matrices of size \f$ n \f$. The number 20 depends weakly on the norm of the matrix. @@ -177,6 +182,9 @@ the scalar logarithm, the equation \f$ \exp(X) = M \f$ may have multiple solutions; this function returns a matrix whose eigenvalues have imaginary part in the interval \f$ (-\pi,\pi] \f$. +The matrix logarithm is different from applying the log function to all the entries in the matrix. +Use ArrayBase::log() if you want to do the latter. + In the real case, the matrix \f$ M \f$ should be invertible and it should have no eigenvalues which are real and negative (pairs of complex conjugate eigenvalues are allowed). In the complex case, it @@ -232,7 +240,8 @@ const MatrixPowerReturnValue MatrixBase::pow(RealScalar p) con The matrix power \f$ M^p \f$ is defined as \f$ \exp(p \log(M)) \f$, where exp denotes the matrix exponential, and log denotes the matrix -logarithm. +logarithm. This is different from raising all the entries in the matrix +to the p-th power. Use ArrayBase::pow() if you want to do the latter. If \p p is complex, the scalar type of \p M should be the type of \p p . \f$ M^p \f$ simply evaluates into \f$ \exp(p \log(M)) \f$. @@ -391,7 +400,9 @@ const MatrixFunctionReturnValue MatrixBase::sin() const \param[in] M a square matrix. \returns expression representing \f$ \sin(M) \f$. -This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::sin(). +This function computes the matrix sine. Use ArrayBase::sin() for computing the entry-wise sine. + +The implementation calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::sin(). Example: \include MatrixSine.cpp Output: \verbinclude MatrixSine.out @@ -428,7 +439,9 @@ const MatrixSquareRootReturnValue MatrixBase::sqrt() const The matrix square root of \f$ M \f$ is the matrix \f$ M^{1/2} \f$ whose square is the original matrix; so if \f$ S = M^{1/2} \f$ then -\f$ S^2 = M \f$. +\f$ S^2 = M \f$. This is different from taking the square root of all +the entries in the matrix; use ArrayBase::sqrt() if you want to do the +latter. In the real case, the matrix \f$ M \f$ should be invertible and it should have no eigenvalues which are real and negative (pairs of -- cgit v1.2.3 From b3d63b4db21e746ef3ef260caa28773c8d3ae77b Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Mon, 1 Sep 2014 17:21:05 +0200 Subject: Add evaluator for DynamicSparseMatrix --- .../Eigen/src/SparseExtra/DynamicSparseMatrix.h | 32 ++++++++++++++++++++++ 1 file changed, 32 insertions(+) (limited to 'unsupported/Eigen') diff --git a/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h b/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h index dec16df28..4210df68a 100644 --- a/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h +++ b/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h @@ -352,6 +352,38 @@ class DynamicSparseMatrix::ReverseInnerIterator : public const Index m_outer; }; +#ifdef EIGEN_ENABLE_EVALUATORS +namespace internal { + +template +struct evaluator > + : evaluator_base > +{ + typedef _Scalar Scalar; + typedef _Index Index; + typedef DynamicSparseMatrix<_Scalar,_Options,_Index> SparseMatrixType; + typedef typename SparseMatrixType::InnerIterator InnerIterator; + typedef typename SparseMatrixType::ReverseInnerIterator ReverseInnerIterator; + + enum { + CoeffReadCost = NumTraits<_Scalar>::ReadCost, + Flags = SparseMatrixType::Flags + }; + + evaluator() : m_matrix(0) {} + evaluator(const SparseMatrixType &mat) : m_matrix(&mat) {} + + operator SparseMatrixType&() { return m_matrix->const_cast_derived(); } + operator const SparseMatrixType&() const { return *m_matrix; } + + Scalar coeff(Index row, Index col) const { return m_matrix->coeff(row,col); } + + const SparseMatrixType *m_matrix; +}; + +} +#endif + } // end namespace Eigen #endif // EIGEN_DYNAMIC_SPARSEMATRIX_H -- cgit v1.2.3 From b051bbd64fcde21a352ff35d23adcd00afaf845d Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Mon, 1 Sep 2014 17:21:47 +0200 Subject: Make unsupport sparse solvers use SparseSolverBase --- unsupported/Eigen/src/IterativeSolvers/DGMRES.h | 13 +++++++++---- unsupported/Eigen/src/IterativeSolvers/GMRES.h | 15 +++++++++------ .../Eigen/src/IterativeSolvers/IncompleteCholesky.h | 15 ++++++++++++--- unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h | 19 +++++++++++++------ unsupported/Eigen/src/IterativeSolvers/MINRES.h | 19 ++++++++++++------- 5 files changed, 55 insertions(+), 26 deletions(-) (limited to 'unsupported/Eigen') diff --git a/unsupported/Eigen/src/IterativeSolvers/DGMRES.h b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h index 9fcc8a8d9..fe0bfd948 100644 --- a/unsupported/Eigen/src/IterativeSolvers/DGMRES.h +++ b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h @@ -108,6 +108,7 @@ class DGMRES : public IterativeSolverBase > using Base::m_isInitialized; using Base::m_tolerance; public: + using Base::_solve_impl; typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; @@ -138,6 +139,7 @@ class DGMRES : public IterativeSolverBase > ~DGMRES() {} +#ifndef EIGEN_TEST_EVALUATORS /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A * \a x0 as an initial solution. * @@ -153,10 +155,11 @@ class DGMRES : public IterativeSolverBase > return internal::solve_retval_with_guess (*this, b.derived(), x0); } +#endif /** \internal */ template - void _solveWithGuess(const Rhs& b, Dest& x) const + void _solve_with_guess_impl(const Rhs& b, Dest& x) const { bool failed = false; for(int j=0; j > /** \internal */ template - void _solve(const Rhs& b, Dest& x) const + void _solve_impl(const Rhs& b, MatrixBase& x) const { x = b; - _solveWithGuess(b,x); + _solve_with_guess_impl(b,x.derived()); } /** * Get the restart value @@ -522,6 +525,7 @@ int DGMRES<_MatrixType, _Preconditioner>::dgmresApplyDeflation(const RhsType &x, return 0; } +#ifndef EIGEN_TEST_EVALUATORS namespace internal { template @@ -533,10 +537,11 @@ struct solve_retval, Rhs> template void evalTo(Dest& dst) const { - dec()._solve(rhs(),dst); + dec()._solve_impl(rhs(),dst); } }; } // end namespace internal +#endif } // end namespace Eigen #endif diff --git a/unsupported/Eigen/src/IterativeSolvers/GMRES.h b/unsupported/Eigen/src/IterativeSolvers/GMRES.h index 67498705b..fd76a9d2c 100644 --- a/unsupported/Eigen/src/IterativeSolvers/GMRES.h +++ b/unsupported/Eigen/src/IterativeSolvers/GMRES.h @@ -281,6 +281,7 @@ private: int m_restart; public: + using Base::_solve_impl; typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; @@ -315,6 +316,7 @@ public: */ void set_restart(const int restart) { m_restart=restart; } +#ifndef EIGEN_TEST_EVALUATORS /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A * \a x0 as an initial solution. * @@ -330,10 +332,11 @@ public: return internal::solve_retval_with_guess (*this, b.derived(), x0); } +#endif /** \internal */ template - void _solveWithGuess(const Rhs& b, Dest& x) const + void _solve_with_guess_impl(const Rhs& b, Dest& x) const { bool failed = false; for(int j=0; j - void _solve(const Rhs& b, Dest& x) const + void _solve_impl(const Rhs& b, MatrixBase &x) const { x = b; if(x.squaredNorm() == 0) return; // Check Zero right hand side - _solveWithGuess(b,x); + _solve_with_guess_impl(b,x.derived()); } protected: }; - +#ifndef EIGEN_TEST_EVALUATORS namespace internal { template @@ -376,12 +379,12 @@ struct solve_retval, Rhs> template void evalTo(Dest& dst) const { - dec()._solve(rhs(),dst); + dec()._solve_impl(rhs(),dst); } }; } // end namespace internal - +#endif } // end namespace Eigen #endif // EIGEN_GMRES_H diff --git a/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h b/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h index 661c1f2e0..1ee1c89b2 100644 --- a/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h +++ b/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h @@ -27,8 +27,11 @@ namespace Eigen { */ template > -class IncompleteCholesky : internal::noncopyable +class IncompleteCholesky : public SparseSolverBase > { + protected: + typedef SparseSolverBase > Base; + using Base::m_isInitialized; public: typedef SparseMatrix MatrixType; typedef _OrderingType OrderingType; @@ -89,7 +92,7 @@ class IncompleteCholesky : internal::noncopyable } template - void _solve(const Rhs& b, Dest& x) const + void _solve_impl(const Rhs& b, Dest& x) const { eigen_assert(m_factorizationIsOk && "factorize() should be called first"); if (m_perm.rows() == b.rows()) @@ -103,6 +106,8 @@ class IncompleteCholesky : internal::noncopyable x = m_perm * x; x = m_scal.asDiagonal() * x; } + +#ifndef EIGEN_TEST_EVALUATORS template inline const internal::solve_retval solve(const MatrixBase& b) const { @@ -112,13 +117,14 @@ class IncompleteCholesky : internal::noncopyable && "IncompleteLLT::solve(): invalid number of rows of the right hand side matrix b"); return internal::solve_retval(*this, b.derived()); } +#endif + protected: SparseMatrix m_L; // The lower part stored in CSC ScalarType m_scal; // The vector for scaling the matrix Scalar m_shift; //The initial shift parameter bool m_analysisIsOk; bool m_factorizationIsOk; - bool m_isInitialized; ComputationInfo m_info; PermutationType m_perm; @@ -256,6 +262,8 @@ inline void IncompleteCholesky::updateList(const Idx listCol[rowIdx(jk)].push_back(col); } } + +#ifndef EIGEN_TEST_EVALUATORS namespace internal { template @@ -272,6 +280,7 @@ struct solve_retval, Rhs> }; } // end namespace internal +#endif } // end namespace Eigen diff --git a/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h b/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h index 67e780181..e86f65644 100644 --- a/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h +++ b/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h @@ -13,8 +13,12 @@ namespace Eigen { template -class IncompleteLU +class IncompleteLU : public SparseSolverBase > { + protected: + typedef SparseSolverBase > Base; + using Base::m_isInitialized; + typedef _Scalar Scalar; typedef Matrix Vector; typedef typename Vector::Index Index; @@ -23,10 +27,10 @@ class IncompleteLU public: typedef Matrix MatrixType; - IncompleteLU() : m_isInitialized(false) {} + IncompleteLU() {} template - IncompleteLU(const MatrixType& mat) : m_isInitialized(false) + IncompleteLU(const MatrixType& mat) { compute(mat); } @@ -71,12 +75,13 @@ class IncompleteLU } template - void _solve(const Rhs& b, Dest& x) const + void _solve_impl(const Rhs& b, Dest& x) const { x = m_lu.template triangularView().solve(b); x = m_lu.template triangularView().solve(x); } +#ifndef EIGEN_TEST_EVALUATORS template inline const internal::solve_retval solve(const MatrixBase& b) const { @@ -85,12 +90,13 @@ class IncompleteLU && "IncompleteLU::solve(): invalid number of rows of the right hand side matrix b"); return internal::solve_retval(*this, b.derived()); } +#endif protected: FactorType m_lu; - bool m_isInitialized; }; +#ifndef EIGEN_TEST_EVALUATORS namespace internal { template @@ -102,11 +108,12 @@ struct solve_retval, Rhs> template void evalTo(Dest& dst) const { - dec()._solve(rhs(),dst); + dec()._solve_impl(rhs(),dst); } }; } // end namespace internal +#endif } // end namespace Eigen diff --git a/unsupported/Eigen/src/IterativeSolvers/MINRES.h b/unsupported/Eigen/src/IterativeSolvers/MINRES.h index 98f9ecc17..28d5c692d 100644 --- a/unsupported/Eigen/src/IterativeSolvers/MINRES.h +++ b/unsupported/Eigen/src/IterativeSolvers/MINRES.h @@ -2,7 +2,7 @@ // for linear algebra. // // Copyright (C) 2012 Giacomo Po -// Copyright (C) 2011 Gael Guennebaud +// Copyright (C) 2011-2014 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed @@ -217,6 +217,7 @@ namespace Eigen { using Base::m_info; using Base::m_isInitialized; public: + using Base::_solve_impl; typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; @@ -244,7 +245,8 @@ namespace Eigen { /** Destructor. */ ~MINRES(){} - + +#ifndef EIGEN_TEST_EVALUATORS /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A * \a x0 as an initial solution. * @@ -260,10 +262,11 @@ namespace Eigen { return internal::solve_retval_with_guess (*this, b.derived(), x0); } +#endif /** \internal */ template - void _solveWithGuess(const Rhs& b, Dest& x) const + void _solve_with_guess_impl(const Rhs& b, Dest& x) const { m_iterations = Base::maxIterations(); m_error = Base::m_tolerance; @@ -284,16 +287,17 @@ namespace Eigen { /** \internal */ template - void _solve(const Rhs& b, Dest& x) const + void _solve_impl(const Rhs& b, MatrixBase &x) const { x.setZero(); - _solveWithGuess(b,x); + _solve_with_guess_impl(b,x.derived()); } protected: }; +#ifndef EIGEN_TEST_EVALUATORS namespace internal { template @@ -305,12 +309,13 @@ namespace Eigen { template void evalTo(Dest& dst) const { - dec()._solve(rhs(),dst); + dec()._solve_impl(rhs(),dst); } }; } // end namespace internal - +#endif + } // end namespace Eigen #endif // EIGEN_MINRES_H -- cgit v1.2.3 From daad9585a3635223b273cb45ba2a965d91060ff8 Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Mon, 1 Sep 2014 17:24:07 +0200 Subject: Fix Kronecker product in legacy mode. --- unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'unsupported/Eigen') diff --git a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h index b8f2cba17..ca66d4d89 100644 --- a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h +++ b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h @@ -215,7 +215,7 @@ struct traits > typedef typename remove_all<_Lhs>::type Lhs; typedef typename remove_all<_Rhs>::type Rhs; typedef typename scalar_product_traits::ReturnType Scalar; - typedef typename promote_storage_type::StorageKind, typename traits::StorageKind>::ret StorageKind; + typedef typename cwise_promote_storage_type::StorageKind, typename traits::StorageKind, scalar_product_op >::ret StorageKind; typedef typename promote_index_type::type Index; enum { -- cgit v1.2.3 From 8754341848b125b1f720b9b210875aa1543666d8 Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Mon, 1 Sep 2014 17:25:13 +0200 Subject: Fix remaining garbage during a merge. --- unsupported/Eigen/MPRealSupport | 4 ---- 1 file changed, 4 deletions(-) (limited to 'unsupported/Eigen') diff --git a/unsupported/Eigen/MPRealSupport b/unsupported/Eigen/MPRealSupport index 630e0aa77..8e42965a3 100644 --- a/unsupported/Eigen/MPRealSupport +++ b/unsupported/Eigen/MPRealSupport @@ -159,11 +159,7 @@ int main() { if(rows==0 || cols==0 || depth==0) return; -<<<<<<< local -======= - ->>>>>>> other mpreal acc1(0,mpfr_get_prec(blockA[0].mpfr_srcptr())), tmp (0,mpfr_get_prec(blockA[0].mpfr_srcptr())); -- cgit v1.2.3 From eb392960285c2645d45118d424786a73768ff50a Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Mon, 1 Sep 2014 18:16:20 +0200 Subject: Reafctoring in D&C SVD unsupported module: clean and merge the SVDBase class to Eigen/SVD, rm copy/pasted JacobiSVD.h file --- Eigen/SVD | 1 + Eigen/src/SVD/JacobiSVD.h | 182 +---- Eigen/src/SVD/SVDBase.h | 263 +++++++ Eigen/src/SVD/UpperBidiagonalization.h | 1 + unsupported/Eigen/BDCSVD | 26 + unsupported/Eigen/SVD | 35 - unsupported/Eigen/src/BDCSVD/BDCSVD.h | 949 ++++++++++++++++++++++++++ unsupported/Eigen/src/BDCSVD/CMakeLists.txt | 6 + unsupported/Eigen/src/BDCSVD/TODOBdcsvd.txt | 29 + unsupported/Eigen/src/BDCSVD/doneInBDCSVD.txt | 21 + unsupported/Eigen/src/CMakeLists.txt | 1 + unsupported/Eigen/src/SVD/BDCSVD.h | 937 ------------------------- unsupported/Eigen/src/SVD/CMakeLists.txt | 6 - unsupported/Eigen/src/SVD/JacobiSVD.h | 782 --------------------- unsupported/Eigen/src/SVD/SVDBase.h | 236 ------- unsupported/Eigen/src/SVD/TODOBdcsvd.txt | 29 - unsupported/Eigen/src/SVD/doneInBDCSVD.txt | 21 - unsupported/test/svd_common.h | 2 +- 18 files changed, 1329 insertions(+), 2198 deletions(-) create mode 100644 Eigen/src/SVD/SVDBase.h create mode 100644 unsupported/Eigen/BDCSVD delete mode 100644 unsupported/Eigen/SVD create mode 100644 unsupported/Eigen/src/BDCSVD/BDCSVD.h create mode 100644 unsupported/Eigen/src/BDCSVD/CMakeLists.txt create mode 100644 unsupported/Eigen/src/BDCSVD/TODOBdcsvd.txt create mode 100644 unsupported/Eigen/src/BDCSVD/doneInBDCSVD.txt delete mode 100644 unsupported/Eigen/src/SVD/BDCSVD.h delete mode 100644 unsupported/Eigen/src/SVD/CMakeLists.txt delete mode 100644 unsupported/Eigen/src/SVD/JacobiSVD.h delete mode 100644 unsupported/Eigen/src/SVD/SVDBase.h delete mode 100644 unsupported/Eigen/src/SVD/TODOBdcsvd.txt delete mode 100644 unsupported/Eigen/src/SVD/doneInBDCSVD.txt (limited to 'unsupported/Eigen') diff --git a/Eigen/SVD b/Eigen/SVD index 5eee46df5..c3d24286c 100644 --- a/Eigen/SVD +++ b/Eigen/SVD @@ -21,6 +21,7 @@ */ #include "src/misc/Solve.h" +#include "src/SVD/SVDBase.h" #include "src/SVD/JacobiSVD.h" #if defined(EIGEN_USE_LAPACKE) && !defined(EIGEN_USE_LAPACKE_STRICT) #include "src/SVD/JacobiSVD_MKL.h" diff --git a/Eigen/src/SVD/JacobiSVD.h b/Eigen/src/SVD/JacobiSVD.h index 412daa746..6f3907f5d 100644 --- a/Eigen/src/SVD/JacobiSVD.h +++ b/Eigen/src/SVD/JacobiSVD.h @@ -2,6 +2,7 @@ // for linear algebra. // // Copyright (C) 2009-2010 Benoit Jacob +// Copyright (C) 2013-2014 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed @@ -442,6 +443,12 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q, *j_left = rot1 * j_right->transpose(); } +template +struct traits > +{ + typedef _MatrixType MatrixType; +}; + } // end namespace internal /** \ingroup SVD_Module @@ -498,7 +505,9 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q, * \sa MatrixBase::jacobiSvd() */ template class JacobiSVD + : public SVDBase > { + typedef SVDBase Base; public: typedef _MatrixType MatrixType; @@ -515,13 +524,10 @@ template class JacobiSVD MatrixOptions = MatrixType::Options }; - typedef Matrix - MatrixUType; - typedef Matrix - MatrixVType; - typedef typename internal::plain_diag_type::type SingularValuesType; + typedef typename Base::MatrixUType MatrixUType; + typedef typename Base::MatrixVType MatrixVType; + typedef typename Base::SingularValuesType SingularValuesType; + typedef typename internal::plain_row_type::type RowType; typedef typename internal::plain_col_type::type ColType; typedef Matrix class JacobiSVD * perform decompositions via JacobiSVD::compute(const MatrixType&). */ JacobiSVD() - : m_isInitialized(false), - m_isAllocated(false), - m_usePrescribedThreshold(false), - m_computationOptions(0), - m_rows(-1), m_cols(-1), m_diagSize(0) {} @@ -549,11 +550,6 @@ template class JacobiSVD * \sa JacobiSVD() */ JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0) - : m_isInitialized(false), - m_isAllocated(false), - m_usePrescribedThreshold(false), - m_computationOptions(0), - m_rows(-1), m_cols(-1) { allocate(rows, cols, computationOptions); } @@ -569,11 +565,6 @@ template class JacobiSVD * available with the (non-default) FullPivHouseholderQR preconditioner. */ JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0) - : m_isInitialized(false), - m_isAllocated(false), - m_usePrescribedThreshold(false), - m_computationOptions(0), - m_rows(-1), m_cols(-1) { compute(matrix, computationOptions); } @@ -601,54 +592,6 @@ template class JacobiSVD return compute(matrix, m_computationOptions); } - /** \returns the \a U matrix. - * - * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, - * the U matrix is n-by-n if you asked for #ComputeFullU, and is n-by-m if you asked for #ComputeThinU. - * - * The \a m first columns of \a U are the left singular vectors of the matrix being decomposed. - * - * This method asserts that you asked for \a U to be computed. - */ - const MatrixUType& matrixU() const - { - eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); - eigen_assert(computeU() && "This JacobiSVD decomposition didn't compute U. Did you ask for it?"); - return m_matrixU; - } - - /** \returns the \a V matrix. - * - * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, - * the V matrix is p-by-p if you asked for #ComputeFullV, and is p-by-m if you asked for ComputeThinV. - * - * The \a m first columns of \a V are the right singular vectors of the matrix being decomposed. - * - * This method asserts that you asked for \a V to be computed. - */ - const MatrixVType& matrixV() const - { - eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); - eigen_assert(computeV() && "This JacobiSVD decomposition didn't compute V. Did you ask for it?"); - return m_matrixV; - } - - /** \returns the vector of singular values. - * - * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the - * returned vector has size \a m. Singular values are always sorted in decreasing order. - */ - const SingularValuesType& singularValues() const - { - eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); - return m_singularValues; - } - - /** \returns true if \a U (full or thin) is asked for in this SVD decomposition */ - inline bool computeU() const { return m_computeFullU || m_computeThinU; } - /** \returns true if \a V (full or thin) is asked for in this SVD decomposition */ - inline bool computeV() const { return m_computeFullV || m_computeThinV; } - /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A. * * \param b the right-hand-side of the equation to solve. @@ -666,94 +609,31 @@ template class JacobiSVD eigen_assert(computeU() && computeV() && "JacobiSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice)."); return internal::solve_retval(*this, b.derived()); } - - /** \returns the number of singular values that are not exactly 0 */ - Index nonzeroSingularValues() const - { - eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); - return m_nonzeroSingularValues; - } - /** \returns the rank of the matrix of which \c *this is the SVD. - * - * \note This method has to determine which singular values should be considered nonzero. - * For that, it uses the threshold value that you can control by calling - * setThreshold(const RealScalar&). - */ - inline Index rank() const - { - using std::abs; - eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); - if(m_singularValues.size()==0) return 0; - RealScalar premultiplied_threshold = m_singularValues.coeff(0) * threshold(); - Index i = m_nonzeroSingularValues-1; - while(i>=0 && m_singularValues.coeff(i) < premultiplied_threshold) --i; - return i+1; - } + using Base::computeU; + using Base::computeV; - /** Allows to prescribe a threshold to be used by certain methods, such as rank() and solve(), - * which need to determine when singular values are to be considered nonzero. - * This is not used for the SVD decomposition itself. - * - * When it needs to get the threshold value, Eigen calls threshold(). - * The default is \c NumTraits::epsilon() - * - * \param threshold The new value to use as the threshold. - * - * A singular value will be considered nonzero if its value is strictly greater than - * \f$ \vert singular value \vert \leqslant threshold \times \vert max singular value \vert \f$. - * - * If you want to come back to the default behavior, call setThreshold(Default_t) - */ - JacobiSVD& setThreshold(const RealScalar& threshold) - { - m_usePrescribedThreshold = true; - m_prescribedThreshold = threshold; - return *this; - } - - /** Allows to come back to the default behavior, letting Eigen use its default formula for - * determining the threshold. - * - * You should pass the special object Eigen::Default as parameter here. - * \code svd.setThreshold(Eigen::Default); \endcode - * - * See the documentation of setThreshold(const RealScalar&). - */ - JacobiSVD& setThreshold(Default_t) - { - m_usePrescribedThreshold = false; - return *this; - } - - /** Returns the threshold that will be used by certain methods such as rank(). - * - * See the documentation of setThreshold(const RealScalar&). - */ - RealScalar threshold() const - { - eigen_assert(m_isInitialized || m_usePrescribedThreshold); - return m_usePrescribedThreshold ? m_prescribedThreshold - : (std::max)(1,m_diagSize)*NumTraits::epsilon(); - } - - inline Index rows() const { return m_rows; } - inline Index cols() const { return m_cols; } - private: void allocate(Index rows, Index cols, unsigned int computationOptions); protected: - MatrixUType m_matrixU; - MatrixVType m_matrixV; - SingularValuesType m_singularValues; + using Base::m_matrixU; + using Base::m_matrixV; + using Base::m_singularValues; + using Base::m_isInitialized; + using Base::m_isAllocated; + using Base::m_usePrescribedThreshold; + using Base::m_computeFullU; + using Base::m_computeThinU; + using Base::m_computeFullV; + using Base::m_computeThinV; + using Base::m_computationOptions; + using Base::m_nonzeroSingularValues; + using Base::m_rows; + using Base::m_cols; + using Base::m_diagSize; + using Base::m_prescribedThreshold; WorkMatrixType m_workMatrix; - bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold; - bool m_computeFullU, m_computeThinU; - bool m_computeFullV, m_computeThinV; - unsigned int m_computationOptions; - Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize; - RealScalar m_prescribedThreshold; template friend struct internal::svd_precondition_2x2_block_to_be_real; diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h new file mode 100644 index 000000000..61b01fb8a --- /dev/null +++ b/Eigen/src/SVD/SVDBase.h @@ -0,0 +1,263 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Benoit Jacob +// Copyright (C) 2014 Gael Guennebaud +// +// Copyright (C) 2013 Gauthier Brun +// Copyright (C) 2013 Nicolas Carre +// Copyright (C) 2013 Jean Ceccato +// Copyright (C) 2013 Pierre Zoppitelli +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SVDBASE_H +#define EIGEN_SVDBASE_H + +namespace Eigen { +/** \ingroup SVD_Module + * + * + * \class SVDBase + * + * \brief Base class of SVD algorithms + * + * \tparam Derived the type of the actual SVD decomposition + * + * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product + * \f[ A = U S V^* \f] + * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal; + * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left + * and right \em singular \em vectors of \a A respectively. + * + * Singular values are always sorted in decreasing order. + * + * + * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the + * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual + * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix, + * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving. + * + * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to + * terminate in finite (and reasonable) time. + * \sa MatrixBase::genericSvd() + */ +template +class SVDBase +{ + +public: + typedef typename internal::traits::MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename MatrixType::Index Index; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime), + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime), + MatrixOptions = MatrixType::Options + }; + + typedef Matrix MatrixUType; + typedef Matrix MatrixVType; + typedef typename internal::plain_diag_type::type SingularValuesType; + + Derived& derived() { return *static_cast(this); } + const Derived& derived() const { return *static_cast(this); } + + /** \returns the \a U matrix. + * + * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, + * the U matrix is n-by-n if you asked for #ComputeFullU, and is n-by-m if you asked for #ComputeThinU. + * + * The \a m first columns of \a U are the left singular vectors of the matrix being decomposed. + * + * This method asserts that you asked for \a U to be computed. + */ + const MatrixUType& matrixU() const + { + eigen_assert(m_isInitialized && "SVD is not initialized."); + eigen_assert(computeU() && "This SVD decomposition didn't compute U. Did you ask for it?"); + return m_matrixU; + } + + /** \returns the \a V matrix. + * + * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, + * the V matrix is p-by-p if you asked for #ComputeFullV, and is p-by-m if you asked for ComputeThinV. + * + * The \a m first columns of \a V are the right singular vectors of the matrix being decomposed. + * + * This method asserts that you asked for \a V to be computed. + */ + const MatrixVType& matrixV() const + { + eigen_assert(m_isInitialized && "SVD is not initialized."); + eigen_assert(computeV() && "This SVD decomposition didn't compute V. Did you ask for it?"); + return m_matrixV; + } + + /** \returns the vector of singular values. + * + * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the + * returned vector has size \a m. Singular values are always sorted in decreasing order. + */ + const SingularValuesType& singularValues() const + { + eigen_assert(m_isInitialized && "SVD is not initialized."); + return m_singularValues; + } + + /** \returns the number of singular values that are not exactly 0 */ + Index nonzeroSingularValues() const + { + eigen_assert(m_isInitialized && "SVD is not initialized."); + return m_nonzeroSingularValues; + } + + /** \returns the rank of the matrix of which \c *this is the SVD. + * + * \note This method has to determine which singular values should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). + */ + inline Index rank() const + { + using std::abs; + eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); + if(m_singularValues.size()==0) return 0; + RealScalar premultiplied_threshold = m_singularValues.coeff(0) * threshold(); + Index i = m_nonzeroSingularValues-1; + while(i>=0 && m_singularValues.coeff(i) < premultiplied_threshold) --i; + return i+1; + } + + /** Allows to prescribe a threshold to be used by certain methods, such as rank() and solve(), + * which need to determine when singular values are to be considered nonzero. + * This is not used for the SVD decomposition itself. + * + * When it needs to get the threshold value, Eigen calls threshold(). + * The default is \c NumTraits::epsilon() + * + * \param threshold The new value to use as the threshold. + * + * A singular value will be considered nonzero if its value is strictly greater than + * \f$ \vert singular value \vert \leqslant threshold \times \vert max singular value \vert \f$. + * + * If you want to come back to the default behavior, call setThreshold(Default_t) + */ + Derived& setThreshold(const RealScalar& threshold) + { + m_usePrescribedThreshold = true; + m_prescribedThreshold = threshold; + return derived(); + } + + /** Allows to come back to the default behavior, letting Eigen use its default formula for + * determining the threshold. + * + * You should pass the special object Eigen::Default as parameter here. + * \code svd.setThreshold(Eigen::Default); \endcode + * + * See the documentation of setThreshold(const RealScalar&). + */ + Derived& setThreshold(Default_t) + { + m_usePrescribedThreshold = false; + return derived(); + } + + /** Returns the threshold that will be used by certain methods such as rank(). + * + * See the documentation of setThreshold(const RealScalar&). + */ + RealScalar threshold() const + { + eigen_assert(m_isInitialized || m_usePrescribedThreshold); + return m_usePrescribedThreshold ? m_prescribedThreshold + : (std::max)(1,m_diagSize)*NumTraits::epsilon(); + } + + /** \returns true if \a U (full or thin) is asked for in this SVD decomposition */ + inline bool computeU() const { return m_computeFullU || m_computeThinU; } + /** \returns true if \a V (full or thin) is asked for in this SVD decomposition */ + inline bool computeV() const { return m_computeFullV || m_computeThinV; } + + inline Index rows() const { return m_rows; } + inline Index cols() const { return m_cols; } + +protected: + // return true if already allocated + bool allocate(Index rows, Index cols, unsigned int computationOptions) ; + + MatrixUType m_matrixU; + MatrixVType m_matrixV; + SingularValuesType m_singularValues; + bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold; + bool m_computeFullU, m_computeThinU; + bool m_computeFullV, m_computeThinV; + unsigned int m_computationOptions; + Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize; + RealScalar m_prescribedThreshold; + + /** \brief Default Constructor. + * + * Default constructor of SVDBase + */ + SVDBase() + : m_isInitialized(false), + m_isAllocated(false), + m_usePrescribedThreshold(false), + m_computationOptions(0), + m_rows(-1), m_cols(-1), m_diagSize(0) + {} + + +}; + + +template +bool SVDBase::allocate(Index rows, Index cols, unsigned int computationOptions) +{ + eigen_assert(rows >= 0 && cols >= 0); + + if (m_isAllocated && + rows == m_rows && + cols == m_cols && + computationOptions == m_computationOptions) + { + return true; + } + + m_rows = rows; + m_cols = cols; + m_isInitialized = false; + m_isAllocated = true; + m_computationOptions = computationOptions; + m_computeFullU = (computationOptions & ComputeFullU) != 0; + m_computeThinU = (computationOptions & ComputeThinU) != 0; + m_computeFullV = (computationOptions & ComputeFullV) != 0; + m_computeThinV = (computationOptions & ComputeThinV) != 0; + eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U"); + eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V"); + eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) && + "SVDBase: thin U and V are only available when your matrix has a dynamic number of columns."); + + m_diagSize = (std::min)(m_rows, m_cols); + m_singularValues.resize(m_diagSize); + if(RowsAtCompileTime==Dynamic) + m_matrixU.resize(m_rows, m_computeFullU ? m_rows : m_computeThinU ? m_diagSize : 0); + if(ColsAtCompileTime==Dynamic) + m_matrixV.resize(m_cols, m_computeFullV ? m_cols : m_computeThinV ? m_diagSize : 0); + + return false; +} + +}// end namespace + +#endif // EIGEN_SVDBASE_H diff --git a/Eigen/src/SVD/UpperBidiagonalization.h b/Eigen/src/SVD/UpperBidiagonalization.h index 40067682c..225b19e3c 100644 --- a/Eigen/src/SVD/UpperBidiagonalization.h +++ b/Eigen/src/SVD/UpperBidiagonalization.h @@ -2,6 +2,7 @@ // for linear algebra. // // Copyright (C) 2010 Benoit Jacob +// Copyright (C) 2013-2014 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed diff --git a/unsupported/Eigen/BDCSVD b/unsupported/Eigen/BDCSVD new file mode 100644 index 000000000..44649dbd0 --- /dev/null +++ b/unsupported/Eigen/BDCSVD @@ -0,0 +1,26 @@ +#ifndef EIGEN_BDCSVD_MODULE_H +#define EIGEN_BDCSVD_MODULE_H + +#include + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +/** \defgroup BDCSVD_Module BDCSVD module + * + * + * + * This module provides Divide & Conquer SVD decomposition for matrices (both real and complex). + * This decomposition is accessible via the following MatrixBase method: + * - MatrixBase::bdcSvd() + * + * \code + * #include + * \endcode + */ + +#include "src/BDCSVD/BDCSVD.h" + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_BDCSVD_MODULE_H +/* vim: set filetype=cpp et sw=2 ts=2 ai: */ diff --git a/unsupported/Eigen/SVD b/unsupported/Eigen/SVD deleted file mode 100644 index 900a8aa60..000000000 --- a/unsupported/Eigen/SVD +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef EIGEN_SVD_MODULE_H -#define EIGEN_SVD_MODULE_H - -#include -#include -#include - -#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" - -/** \defgroup SVD_Module SVD module - * - * - * - * This module provides SVD decomposition for matrices (both real and complex). - * This decomposition is accessible via the following MatrixBase method: - * - MatrixBase::jacobiSvd() - * - * \code - * #include - * \endcode - */ - -#include "../../Eigen/src/misc/Solve.h" -#include "../../Eigen/src/SVD/UpperBidiagonalization.h" -#include "src/SVD/SVDBase.h" -#include "src/SVD/JacobiSVD.h" -#include "src/SVD/BDCSVD.h" -#if defined(EIGEN_USE_LAPACKE) && !defined(EIGEN_USE_LAPACKE_STRICT) -#include "../../Eigen/src/SVD/JacobiSVD_MKL.h" -#endif - -#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" - -#endif // EIGEN_SVD_MODULE_H -/* vim: set filetype=cpp et sw=2 ts=2 ai: */ diff --git a/unsupported/Eigen/src/BDCSVD/BDCSVD.h b/unsupported/Eigen/src/BDCSVD/BDCSVD.h new file mode 100644 index 000000000..a7c369633 --- /dev/null +++ b/unsupported/Eigen/src/BDCSVD/BDCSVD.h @@ -0,0 +1,949 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// We used the "A Divide-And-Conquer Algorithm for the Bidiagonal SVD" +// research report written by Ming Gu and Stanley C.Eisenstat +// The code variable names correspond to the names they used in their +// report +// +// Copyright (C) 2013 Gauthier Brun +// Copyright (C) 2013 Nicolas Carre +// Copyright (C) 2013 Jean Ceccato +// Copyright (C) 2013 Pierre Zoppitelli +// Copyright (C) 2013 Jitse Niesen +// +// Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BDCSVD_H +#define EIGEN_BDCSVD_H + +#define EPSILON 0.0000000000000001 + +#define ALGOSWAP 16 + +namespace Eigen { + +template class BDCSVD; + +namespace internal { + +template +struct traits > +{ + typedef _MatrixType MatrixType; +}; + +} // end namespace internal + + +/** \ingroup SVD_Module + * + * + * \class BDCSVD + * + * \brief class Bidiagonal Divide and Conquer SVD + * + * \param MatrixType the type of the matrix of which we are computing the SVD decomposition + * We plan to have a very similar interface to JacobiSVD on this class. + * It should be used to speed up the calcul of SVD for big matrices. + */ +template +class BDCSVD : public SVDBase > +{ + typedef SVDBase Base; + +public: + using Base::rows; + using Base::cols; + + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename MatrixType::Index Index; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime), + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime), + MatrixOptions = MatrixType::Options + }; + + typedef Matrix + MatrixUType; + typedef Matrix + MatrixVType; + typedef typename internal::plain_diag_type::type SingularValuesType; + typedef typename internal::plain_row_type::type RowType; + typedef typename internal::plain_col_type::type ColType; + typedef Matrix MatrixX; + typedef Matrix MatrixXr; + typedef Matrix VectorType; + typedef Array ArrayXr; + + /** \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via BDCSVD::compute(const MatrixType&). + */ + BDCSVD() : algoswap(ALGOSWAP), m_numIters(0) + {} + + + /** \brief Default Constructor with memory preallocation + * + * Like the default constructor but with preallocation of the internal data + * according to the specified problem size. + * \sa BDCSVD() + */ + BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0) + : algoswap(ALGOSWAP), m_numIters(0) + { + allocate(rows, cols, computationOptions); + } + + /** \brief Constructor performing the decomposition of given matrix. + * + * \param matrix the matrix to decompose + * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. + * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, + * #ComputeFullV, #ComputeThinV. + * + * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not + * available with the (non - default) FullPivHouseholderQR preconditioner. + */ + BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0) + : algoswap(ALGOSWAP), m_numIters(0) + { + compute(matrix, computationOptions); + } + + ~BDCSVD() + { + } + + /** \brief Method performing the decomposition of given matrix using custom options. + * + * \param matrix the matrix to decompose + * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. + * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, + * #ComputeFullV, #ComputeThinV. + * + * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not + * available with the (non - default) FullPivHouseholderQR preconditioner. + */ + BDCSVD& compute(const MatrixType& matrix, unsigned int computationOptions); + + /** \brief Method performing the decomposition of given matrix using current options. + * + * \param matrix the matrix to decompose + * + * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int). + */ + BDCSVD& compute(const MatrixType& matrix) + { + return compute(matrix, this->m_computationOptions); + } + + void setSwitchSize(int s) + { + eigen_assert(s>3 && "BDCSVD the size of the algo switch has to be greater than 3"); + algoswap = s; + } + + + /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A. + * + * \param b the right - hand - side of the equation to solve. + * + * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V. + * + * \note SVD solving is implicitly least - squares. Thus, this method serves both purposes of exact solving and least - squares solving. + * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$. + */ + template + inline const internal::solve_retval + solve(const MatrixBase& b) const + { + eigen_assert(this->m_isInitialized && "BDCSVD is not initialized."); + eigen_assert(computeU() && computeV() && + "BDCSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice)."); + return internal::solve_retval(*this, b.derived()); + } + + + const MatrixUType& matrixU() const + { + eigen_assert(this->m_isInitialized && "SVD is not initialized."); + if (isTranspose){ + eigen_assert(this->computeV() && "This SVD decomposition didn't compute U. Did you ask for it?"); + return this->m_matrixV; + } + else + { + eigen_assert(this->computeU() && "This SVD decomposition didn't compute U. Did you ask for it?"); + return this->m_matrixU; + } + + } + + + const MatrixVType& matrixV() const + { + eigen_assert(this->m_isInitialized && "SVD is not initialized."); + if (isTranspose){ + eigen_assert(this->computeU() && "This SVD decomposition didn't compute V. Did you ask for it?"); + return this->m_matrixU; + } + else + { + eigen_assert(this->computeV() && "This SVD decomposition didn't compute V. Did you ask for it?"); + return this->m_matrixV; + } + } + + using Base::computeU; + using Base::computeV; + +private: + void allocate(Index rows, Index cols, unsigned int computationOptions); + void divide(Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift); + void computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V); + void computeSingVals(const ArrayXr& col0, const ArrayXr& diag, VectorType& singVals, + ArrayXr& shifts, ArrayXr& mus); + void perturbCol0(const ArrayXr& col0, const ArrayXr& diag, const VectorType& singVals, + const ArrayXr& shifts, const ArrayXr& mus, ArrayXr& zhat); + void computeSingVecs(const ArrayXr& zhat, const ArrayXr& diag, const VectorType& singVals, + const ArrayXr& shifts, const ArrayXr& mus, MatrixXr& U, MatrixXr& V); + void deflation43(Index firstCol, Index shift, Index i, Index size); + void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size); + void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift); + void copyUV(const typename internal::UpperBidiagonalization::HouseholderUSequenceType& householderU, + const typename internal::UpperBidiagonalization::HouseholderVSequenceType& householderV); + +protected: + MatrixXr m_naiveU, m_naiveV; + MatrixXr m_computed; + Index nRec; + int algoswap; + bool isTranspose, compU, compV; + +public: + int m_numIters; +}; //end class BDCSVD + + +// Methode to allocate ans initialize matrix and attributs +template +void BDCSVD::allocate(Index rows, Index cols, unsigned int computationOptions) +{ + isTranspose = (cols > rows); + if (Base::allocate(rows, cols, computationOptions)) return; + m_computed = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize ); + if (isTranspose){ + compU = this->computeU(); + compV = this->computeV(); + } + else + { + compV = this->computeU(); + compU = this->computeV(); + } + if (compU) m_naiveU = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize + 1 ); + else m_naiveU = MatrixXr::Zero(2, this->m_diagSize + 1 ); + + if (compV) m_naiveV = MatrixXr::Zero(this->m_diagSize, this->m_diagSize); + + + //should be changed for a cleaner implementation + if (isTranspose){ + bool aux; + if (this->computeU()||this->computeV()){ + aux = this->m_computeFullU; + this->m_computeFullU = this->m_computeFullV; + this->m_computeFullV = aux; + aux = this->m_computeThinU; + this->m_computeThinU = this->m_computeThinV; + this->m_computeThinV = aux; + } + } +}// end allocate + +// Methode which compute the BDCSVD for the int +template<> +BDCSVD >& BDCSVD >::compute(const MatrixType& matrix, unsigned int computationOptions) { + allocate(matrix.rows(), matrix.cols(), computationOptions); + this->m_nonzeroSingularValues = 0; + m_computed = Matrix::Zero(rows(), cols()); + for (int i=0; im_diagSize; i++) { + this->m_singularValues.coeffRef(i) = 0; + } + if (this->m_computeFullU) this->m_matrixU = Matrix::Zero(rows(), rows()); + if (this->m_computeFullV) this->m_matrixV = Matrix::Zero(cols(), cols()); + this->m_isInitialized = true; + return *this; +} + + +// Methode which compute the BDCSVD +template +BDCSVD& BDCSVD::compute(const MatrixType& matrix, unsigned int computationOptions) +{ + allocate(matrix.rows(), matrix.cols(), computationOptions); + using std::abs; + + //**** step 1 Bidiagonalization isTranspose = (matrix.cols()>matrix.rows()) ; + MatrixType copy; + if (isTranspose) copy = matrix.adjoint(); + else copy = matrix; + + internal::UpperBidiagonalization bid(copy); + + //**** step 2 Divide + m_computed.topRows(this->m_diagSize) = bid.bidiagonal().toDenseMatrix().transpose(); + m_computed.template bottomRows<1>().setZero(); + divide(0, this->m_diagSize - 1, 0, 0, 0); + + //**** step 3 copy + for (int i=0; im_diagSize; i++) { + RealScalar a = abs(m_computed.coeff(i, i)); + this->m_singularValues.coeffRef(i) = a; + if (a == 0){ + this->m_nonzeroSingularValues = i; + this->m_singularValues.tail(this->m_diagSize - i - 1).setZero(); + break; + } + else if (i == this->m_diagSize - 1) + { + this->m_nonzeroSingularValues = i + 1; + break; + } + } + copyUV(bid.householderU(), bid.householderV()); + this->m_isInitialized = true; + return *this; +}// end compute + + +template +void BDCSVD::copyUV(const typename internal::UpperBidiagonalization::HouseholderUSequenceType& householderU, + const typename internal::UpperBidiagonalization::HouseholderVSequenceType& householderV) +{ + // Note exchange of U and V: m_matrixU is set from m_naiveV and vice versa + if (this->computeU()){ + Index Ucols = this->m_computeThinU ? this->m_nonzeroSingularValues : householderU.cols(); + this->m_matrixU = MatrixX::Identity(householderU.cols(), Ucols); + Index blockCols = this->m_computeThinU ? this->m_nonzeroSingularValues : this->m_diagSize; + this->m_matrixU.block(0, 0, this->m_diagSize, blockCols) = + m_naiveV.template cast().block(0, 0, this->m_diagSize, blockCols); + this->m_matrixU = householderU * this->m_matrixU; + } + if (this->computeV()){ + Index Vcols = this->m_computeThinV ? this->m_nonzeroSingularValues : householderV.cols(); + this->m_matrixV = MatrixX::Identity(householderV.cols(), Vcols); + Index blockCols = this->m_computeThinV ? this->m_nonzeroSingularValues : this->m_diagSize; + this->m_matrixV.block(0, 0, this->m_diagSize, blockCols) = + m_naiveU.template cast().block(0, 0, this->m_diagSize, blockCols); + this->m_matrixV = householderV * this->m_matrixV; + } +} + +// The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the +// place of the submatrix we are currently working on. + +//@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU; +//@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU; +// lastCol + 1 - firstCol is the size of the submatrix. +//@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W) +//@param firstRowW : Same as firstRowW with the column. +//@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix +// to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper. +template +void BDCSVD::divide (Index firstCol, Index lastCol, Index firstRowW, + Index firstColW, Index shift) +{ + // requires nbRows = nbCols + 1; + using std::pow; + using std::sqrt; + using std::abs; + const Index n = lastCol - firstCol + 1; + const Index k = n/2; + RealScalar alphaK; + RealScalar betaK; + RealScalar r0; + RealScalar lambda, phi, c0, s0; + MatrixXr l, f; + // We use the other algorithm which is more efficient for small + // matrices. + if (n < algoswap){ + JacobiSVD b(m_computed.block(firstCol, firstCol, n + 1, n), + ComputeFullU | (ComputeFullV * compV)) ; + if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() << b.matrixU(); + else + { + m_naiveU.row(0).segment(firstCol, n + 1).real() << b.matrixU().row(0); + m_naiveU.row(1).segment(firstCol, n + 1).real() << b.matrixU().row(n); + } + if (compV) m_naiveV.block(firstRowW, firstColW, n, n).real() << b.matrixV(); + m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero(); + for (int i=0; i= firstCol; i--) + { + m_naiveU.col(i + 1).segment(firstCol, k + 1) << m_naiveU.col(i).segment(firstCol, k + 1); + } + // we shift q1 at the left with a factor c0 + m_naiveU.col(firstCol).segment( firstCol, k + 1) << (q1 * c0); + // last column = q1 * - s0 + m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) << (q1 * ( - s0)); + // first column = q2 * s0 + m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) << + m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *s0; + // q2 *= c0 + m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0; + } + else + { + RealScalar q1 = (m_naiveU(0, firstCol + k)); + // we shift Q1 to the right + for (Index i = firstCol + k - 1; i >= firstCol; i--) + { + m_naiveU(0, i + 1) = m_naiveU(0, i); + } + // we shift q1 at the left with a factor c0 + m_naiveU(0, firstCol) = (q1 * c0); + // last column = q1 * - s0 + m_naiveU(0, lastCol + 1) = (q1 * ( - s0)); + // first column = q2 * s0 + m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0; + // q2 *= c0 + m_naiveU(1, lastCol + 1) *= c0; + m_naiveU.row(1).segment(firstCol + 1, k).setZero(); + m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero(); + } + m_computed(firstCol + shift, firstCol + shift) = r0; + m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) << alphaK * l.transpose().real(); + m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) << betaK * f.transpose().real(); + + + // Second part: try to deflate singular values in combined matrix + deflation(firstCol, lastCol, k, firstRowW, firstColW, shift); + + // Third part: compute SVD of combined matrix + MatrixXr UofSVD, VofSVD; + VectorType singVals; + computeSVDofM(firstCol + shift, n, UofSVD, singVals, VofSVD); + if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1) *= UofSVD; + else m_naiveU.block(0, firstCol, 2, n + 1) *= UofSVD; + if (compV) m_naiveV.block(firstRowW, firstColW, n, n) *= VofSVD; + m_computed.block(firstCol + shift, firstCol + shift, n, n).setZero(); + m_computed.block(firstCol + shift, firstCol + shift, n, n).diagonal() = singVals; +}// end divide + +// Compute SVD of m_computed.block(firstCol, firstCol, n + 1, n); this block only has non-zeros in +// the first column and on the diagonal and has undergone deflation, so diagonal is in increasing +// order except for possibly the (0,0) entry. The computed SVD is stored U, singVals and V, except +// that if compV is false, then V is not computed. Singular values are sorted in decreasing order. +// +// TODO Opportunities for optimization: better root finding algo, better stopping criterion, better +// handling of round-off errors, be consistent in ordering +template +void BDCSVD::computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V) +{ + // TODO Get rid of these copies (?) + ArrayXr col0 = m_computed.block(firstCol, firstCol, n, 1); + ArrayXr diag = m_computed.block(firstCol, firstCol, n, n).diagonal(); + diag(0) = 0; + + // compute singular values and vectors (in decreasing order) + singVals.resize(n); + U.resize(n+1, n+1); + if (compV) V.resize(n, n); + + if (col0.hasNaN() || diag.hasNaN()) return; + + ArrayXr shifts(n), mus(n), zhat(n); + computeSingVals(col0, diag, singVals, shifts, mus); + perturbCol0(col0, diag, singVals, shifts, mus, zhat); + computeSingVecs(zhat, diag, singVals, shifts, mus, U, V); + + // Reverse order so that singular values in increased order + singVals.reverseInPlace(); + U.leftCols(n) = U.leftCols(n).rowwise().reverse().eval(); + if (compV) V = V.rowwise().reverse().eval(); +} + +template +void BDCSVD::computeSingVals(const ArrayXr& col0, const ArrayXr& diag, + VectorType& singVals, ArrayXr& shifts, ArrayXr& mus) +{ + using std::abs; + using std::swap; + + Index n = col0.size(); + for (Index k = 0; k < n; ++k) { + if (col0(k) == 0) { + // entry is deflated, so singular value is on diagonal + singVals(k) = diag(k); + mus(k) = 0; + shifts(k) = diag(k); + continue; + } + + // otherwise, use secular equation to find singular value + RealScalar left = diag(k); + RealScalar right = (k != n-1) ? diag(k+1) : (diag(n-1) + col0.matrix().norm()); + + // first decide whether it's closer to the left end or the right end + RealScalar mid = left + (right-left) / 2; + RealScalar fMid = 1 + (col0.square() / ((diag + mid) * (diag - mid))).sum(); + + RealScalar shift; + if (k == n-1 || fMid > 0) shift = left; + else shift = right; + + // measure everything relative to shift + ArrayXr diagShifted = diag - shift; + + // initial guess + RealScalar muPrev, muCur; + if (shift == left) { + muPrev = (right - left) * 0.1; + if (k == n-1) muCur = right - left; + else muCur = (right - left) * 0.5; + } else { + muPrev = -(right - left) * 0.1; + muCur = -(right - left) * 0.5; + } + + RealScalar fPrev = 1 + (col0.square() / ((diagShifted - muPrev) * (diag + shift + muPrev))).sum(); + RealScalar fCur = 1 + (col0.square() / ((diagShifted - muCur) * (diag + shift + muCur))).sum(); + if (abs(fPrev) < abs(fCur)) { + swap(fPrev, fCur); + swap(muPrev, muCur); + } + + // rational interpolation: fit a function of the form a / mu + b through the two previous + // iterates and use its zero to compute the next iterate + bool useBisection = false; + while (abs(muCur - muPrev) > 8 * NumTraits::epsilon() * (std::max)(abs(muCur), abs(muPrev)) && fCur != fPrev && !useBisection) { + ++m_numIters; + + RealScalar a = (fCur - fPrev) / (1/muCur - 1/muPrev); + RealScalar b = fCur - a / muCur; + + muPrev = muCur; + fPrev = fCur; + muCur = -a / b; + fCur = 1 + (col0.square() / ((diagShifted - muCur) * (diag + shift + muCur))).sum(); + + if (shift == left && (muCur < 0 || muCur > right - left)) useBisection = true; + if (shift == right && (muCur < -(right - left) || muCur > 0)) useBisection = true; + } + + // fall back on bisection method if rational interpolation did not work + if (useBisection) { + RealScalar leftShifted, rightShifted; + if (shift == left) { + leftShifted = 1e-30; + if (k == 0) rightShifted = right - left; + else rightShifted = (right - left) * 0.6; // theoretically we can take 0.5, but let's be safe + } else { + leftShifted = -(right - left) * 0.6; + rightShifted = -1e-30; + } + + RealScalar fLeft = 1 + (col0.square() / ((diagShifted - leftShifted) * (diag + shift + leftShifted))).sum(); + RealScalar fRight = 1 + (col0.square() / ((diagShifted - rightShifted) * (diag + shift + rightShifted))).sum(); + assert(fLeft * fRight < 0); + + while (rightShifted - leftShifted > 2 * NumTraits::epsilon() * (std::max)(abs(leftShifted), abs(rightShifted))) { + RealScalar midShifted = (leftShifted + rightShifted) / 2; + RealScalar fMid = 1 + (col0.square() / ((diagShifted - midShifted) * (diag + shift + midShifted))).sum(); + if (fLeft * fMid < 0) { + rightShifted = midShifted; + fRight = fMid; + } else { + leftShifted = midShifted; + fLeft = fMid; + } + } + + muCur = (leftShifted + rightShifted) / 2; + } + + singVals[k] = shift + muCur; + shifts[k] = shift; + mus[k] = muCur; + + // perturb singular value slightly if it equals diagonal entry to avoid division by zero later + // (deflation is supposed to avoid this from happening) + if (singVals[k] == left) singVals[k] *= 1 + NumTraits::epsilon(); + if (singVals[k] == right) singVals[k] *= 1 - NumTraits::epsilon(); + } +} + + +// zhat is perturbation of col0 for which singular vectors can be computed stably (see Section 3.1) +template +void BDCSVD::perturbCol0 + (const ArrayXr& col0, const ArrayXr& diag, const VectorType& singVals, + const ArrayXr& shifts, const ArrayXr& mus, ArrayXr& zhat) +{ + Index n = col0.size(); + for (Index k = 0; k < n; ++k) { + if (col0(k) == 0) + zhat(k) = 0; + else { + // see equation (3.6) + using std::sqrt; + RealScalar tmp = + sqrt( + (singVals(n-1) + diag(k)) * (mus(n-1) + (shifts(n-1) - diag(k))) + * ( + ((singVals.head(k).array() + diag(k)) * (mus.head(k) + (shifts.head(k) - diag(k)))) + / ((diag.head(k).array() + diag(k)) * (diag.head(k).array() - diag(k))) + ).prod() + * ( + ((singVals.segment(k, n-k-1).array() + diag(k)) * (mus.segment(k, n-k-1) + (shifts.segment(k, n-k-1) - diag(k)))) + / ((diag.tail(n-k-1) + diag(k)) * (diag.tail(n-k-1) - diag(k))) + ).prod() + ); + if (col0(k) > 0) zhat(k) = tmp; + else zhat(k) = -tmp; + } + } +} + +// compute singular vectors +template +void BDCSVD::computeSingVecs + (const ArrayXr& zhat, const ArrayXr& diag, const VectorType& singVals, + const ArrayXr& shifts, const ArrayXr& mus, MatrixXr& U, MatrixXr& V) +{ + Index n = zhat.size(); + for (Index k = 0; k < n; ++k) { + if (zhat(k) == 0) { + U.col(k) = VectorType::Unit(n+1, k); + if (compV) V.col(k) = VectorType::Unit(n, k); + } else { + U.col(k).head(n) = zhat / (((diag - shifts(k)) - mus(k)) * (diag + singVals[k])); + U(n,k) = 0; + U.col(k).normalize(); + + if (compV) { + V.col(k).tail(n-1) = (diag * zhat / (((diag - shifts(k)) - mus(k)) * (diag + singVals[k]))).tail(n-1); + V(0,k) = -1; + V.col(k).normalize(); + } + } + } + U.col(n) = VectorType::Unit(n+1, n); +} + + +// page 12_13 +// i >= 1, di almost null and zi non null. +// We use a rotation to zero out zi applied to the left of M +template +void BDCSVD::deflation43(Index firstCol, Index shift, Index i, Index size){ + using std::abs; + using std::sqrt; + using std::pow; + RealScalar c = m_computed(firstCol + shift, firstCol + shift); + RealScalar s = m_computed(i, firstCol + shift); + RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2)); + if (r == 0){ + m_computed(i, i)=0; + return; + } + c/=r; + s/=r; + m_computed(firstCol + shift, firstCol + shift) = r; + m_computed(i, firstCol + shift) = 0; + m_computed(i, i) = 0; + if (compU){ + m_naiveU.col(firstCol).segment(firstCol,size) = + c * m_naiveU.col(firstCol).segment(firstCol, size) - + s * m_naiveU.col(i).segment(firstCol, size) ; + + m_naiveU.col(i).segment(firstCol, size) = + (c + s*s/c) * m_naiveU.col(i).segment(firstCol, size) + + (s/c) * m_naiveU.col(firstCol).segment(firstCol,size); + } +}// end deflation 43 + + +// page 13 +// i,j >= 1, i != j and |di - dj| < epsilon * norm2(M) +// We apply two rotations to have zj = 0; +template +void BDCSVD::deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size){ + using std::abs; + using std::sqrt; + using std::conj; + using std::pow; + RealScalar c = m_computed(firstColm, firstColm + j - 1); + RealScalar s = m_computed(firstColm, firstColm + i - 1); + RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2)); + if (r==0){ + m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j); + return; + } + c/=r; + s/=r; + m_computed(firstColm + i, firstColm) = r; + m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j); + m_computed(firstColm + j, firstColm) = 0; + if (compU){ + m_naiveU.col(firstColu + i).segment(firstColu, size) = + c * m_naiveU.col(firstColu + i).segment(firstColu, size) - + s * m_naiveU.col(firstColu + j).segment(firstColu, size) ; + + m_naiveU.col(firstColu + j).segment(firstColu, size) = + (c + s*s/c) * m_naiveU.col(firstColu + j).segment(firstColu, size) + + (s/c) * m_naiveU.col(firstColu + i).segment(firstColu, size); + } + if (compV){ + m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) = + c * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) + + s * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) ; + + m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) = + (c + s*s/c) * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) - + (s/c) * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1); + } +}// end deflation 44 + + +// acts on block from (firstCol+shift, firstCol+shift) to (lastCol+shift, lastCol+shift) [inclusive] +template +void BDCSVD::deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift){ + //condition 4.1 + using std::sqrt; + const Index length = lastCol + 1 - firstCol; + RealScalar norm1 = m_computed.block(firstCol+shift, firstCol+shift, length, 1).squaredNorm(); + RealScalar norm2 = m_computed.block(firstCol+shift, firstCol+shift, length, length).diagonal().squaredNorm(); + RealScalar EPS = 10 * NumTraits::epsilon() * sqrt(norm1 + norm2); + if (m_computed(firstCol + shift, firstCol + shift) < EPS){ + m_computed(firstCol + shift, firstCol + shift) = EPS; + } + + //condition 4.2 + for (Index i=firstCol + shift + 1;i<=lastCol + shift;i++){ + if (std::abs(m_computed(i, firstCol + shift)) < EPS){ + m_computed(i, firstCol + shift) = 0; + } + } + + //condition 4.3 + for (Index i=firstCol + shift + 1;i<=lastCol + shift; i++){ + if (m_computed(i, i) < EPS){ + deflation43(firstCol, shift, i, length); + } + } + + //condition 4.4 + + Index i=firstCol + shift + 1, j=firstCol + shift + k + 1; + //we stock the final place of each line + Index *permutation = new Index[length]; + + for (Index p =1; p < length; p++) { + if (i> firstCol + shift + k){ + permutation[p] = j; + j++; + } else if (j> lastCol + shift) + { + permutation[p] = i; + i++; + } + else + { + if (m_computed(i, i) < m_computed(j, j)){ + permutation[p] = j; + j++; + } + else + { + permutation[p] = i; + i++; + } + } + } + //we do the permutation + RealScalar aux; + //we stock the current index of each col + //and the column of each index + Index *realInd = new Index[length]; + Index *realCol = new Index[length]; + for (int pos = 0; pos< length; pos++){ + realCol[pos] = pos + firstCol + shift; + realInd[pos] = pos; + } + const Index Zero = firstCol + shift; + VectorType temp; + for (int i = 1; i < length - 1; i++){ + const Index I = i + Zero; + const Index realI = realInd[i]; + const Index j = permutation[length - i] - Zero; + const Index J = realCol[j]; + + //diag displace + aux = m_computed(I, I); + m_computed(I, I) = m_computed(J, J); + m_computed(J, J) = aux; + + //firstrow displace + aux = m_computed(I, Zero); + m_computed(I, Zero) = m_computed(J, Zero); + m_computed(J, Zero) = aux; + + // change columns + if (compU) { + temp = m_naiveU.col(I - shift).segment(firstCol, length + 1); + m_naiveU.col(I - shift).segment(firstCol, length + 1) << + m_naiveU.col(J - shift).segment(firstCol, length + 1); + m_naiveU.col(J - shift).segment(firstCol, length + 1) << temp; + } + else + { + temp = m_naiveU.col(I - shift).segment(0, 2); + m_naiveU.col(I - shift).segment(0, 2) << + m_naiveU.col(J - shift).segment(0, 2); + m_naiveU.col(J - shift).segment(0, 2) << temp; + } + if (compV) { + const Index CWI = I + firstColW - Zero; + const Index CWJ = J + firstColW - Zero; + temp = m_naiveV.col(CWI).segment(firstRowW, length); + m_naiveV.col(CWI).segment(firstRowW, length) << m_naiveV.col(CWJ).segment(firstRowW, length); + m_naiveV.col(CWJ).segment(firstRowW, length) << temp; + } + + //update real pos + realCol[realI] = J; + realCol[j] = I; + realInd[J - Zero] = realI; + realInd[I - Zero] = j; + } + for (Index i = firstCol + shift + 1; i +struct solve_retval, Rhs> + : solve_retval_base, Rhs> +{ + typedef BDCSVD<_MatrixType> BDCSVDType; + EIGEN_MAKE_SOLVE_HELPERS(BDCSVDType, Rhs) + + template void evalTo(Dest& dst) const + { + eigen_assert(rhs().rows() == dec().rows()); + // A = U S V^* + // So A^{ - 1} = V S^{ - 1} U^* + Index diagSize = (std::min)(dec().rows(), dec().cols()); + typename BDCSVDType::SingularValuesType invertedSingVals(diagSize); + Index nonzeroSingVals = dec().nonzeroSingularValues(); + invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse(); + invertedSingVals.tail(diagSize - nonzeroSingVals).setZero(); + + dst = dec().matrixV().leftCols(diagSize) + * invertedSingVals.asDiagonal() + * dec().matrixU().leftCols(diagSize).adjoint() + * rhs(); + return; + } +}; + +} //end namespace internal + + /** \svd_module + * + * \return the singular value decomposition of \c *this computed by + * BDC Algorithm + * + * \sa class BDCSVD + */ +/* +template +BDCSVD::PlainObject> +MatrixBase::bdcSvd(unsigned int computationOptions) const +{ + return BDCSVD(*this, computationOptions); +} +*/ + +} // end namespace Eigen + +#endif diff --git a/unsupported/Eigen/src/BDCSVD/CMakeLists.txt b/unsupported/Eigen/src/BDCSVD/CMakeLists.txt new file mode 100644 index 000000000..73b89ea18 --- /dev/null +++ b/unsupported/Eigen/src/BDCSVD/CMakeLists.txt @@ -0,0 +1,6 @@ +FILE(GLOB Eigen_BDCSVD_SRCS "*.h") + +INSTALL(FILES + ${Eigen_BDCSVD_SRCS} + DESTINATION ${INCLUDE_INSTALL_DIR}unsupported/Eigen/src/BDCSVD COMPONENT Devel + ) diff --git a/unsupported/Eigen/src/BDCSVD/TODOBdcsvd.txt b/unsupported/Eigen/src/BDCSVD/TODOBdcsvd.txt new file mode 100644 index 000000000..0bc9a46e6 --- /dev/null +++ b/unsupported/Eigen/src/BDCSVD/TODOBdcsvd.txt @@ -0,0 +1,29 @@ +TO DO LIST + + + +(optional optimization) - do all the allocations in the allocate part + - support static matrices + - return a error at compilation time when using integer matrices (int, long, std::complex, ...) + +to finish the algorithm : + -implement the last part of the algorithm as described on the reference paper. + You may find more information on that part on this paper + + -to replace the call to JacobiSVD at the end of the divide algorithm, just after the call to + deflation. + +(suggested step by step resolution) + 0) comment the call to Jacobi in the last part of the divide method and everything right after + until the end of the method. What is commented can be a guideline to steps 3) 4) and 6) + 1) solve the secular equation (Characteristic equation) on the values that are not null (zi!=0 and di!=0), after the deflation + wich should be uncommented in the divide method + 2) remember the values of the singular values that are already computed (zi=0) + 3) assign the singular values found in m_computed at the right places (with the ones found in step 2) ) + in decreasing order + 4) set the firstcol to zero (except the first element) in m_computed + 5) compute all the singular vectors when CompV is set to true and only the left vectors when + CompV is set to false + 6) multiply naiveU and naiveV to the right by the matrices found, only naiveU when CompV is set to + false, /!\ if CompU is false NaiveU has only 2 rows + 7) delete everything commented in step 0) diff --git a/unsupported/Eigen/src/BDCSVD/doneInBDCSVD.txt b/unsupported/Eigen/src/BDCSVD/doneInBDCSVD.txt new file mode 100644 index 000000000..8563ddab8 --- /dev/null +++ b/unsupported/Eigen/src/BDCSVD/doneInBDCSVD.txt @@ -0,0 +1,21 @@ +This unsupported package is about a divide and conquer algorithm to compute SVD. + +The implementation follows as closely as possible the following reference paper : +http://www.cs.yale.edu/publications/techreports/tr933.pdf + +The code documentation uses the same names for variables as the reference paper. The code, deflation included, is +working but there are a few things that could be optimised as explained in the TODOBdsvd. + +In the code comments were put at the line where would be the third step of the algorithm so one could simply add the call +of a function doing the last part of the algorithm and that would not require any knowledge of the part we implemented. + +In the TODOBdcsvd we explain what is the main difficulty of the last part and suggest a reference paper to help solve it. + +The implemented has trouble with fixed size matrices. + +In the actual implementation, it returns matrices of zero when ask to do a svd on an int matrix. + + +Paper for the third part: +http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf + diff --git a/unsupported/Eigen/src/CMakeLists.txt b/unsupported/Eigen/src/CMakeLists.txt index 8eb2808e3..654a2327f 100644 --- a/unsupported/Eigen/src/CMakeLists.txt +++ b/unsupported/Eigen/src/CMakeLists.txt @@ -12,3 +12,4 @@ ADD_SUBDIRECTORY(Skyline) ADD_SUBDIRECTORY(SparseExtra) ADD_SUBDIRECTORY(KroneckerProduct) ADD_SUBDIRECTORY(Splines) +ADD_SUBDIRECTORY(BDCSVD) diff --git a/unsupported/Eigen/src/SVD/BDCSVD.h b/unsupported/Eigen/src/SVD/BDCSVD.h deleted file mode 100644 index 80006fd60..000000000 --- a/unsupported/Eigen/src/SVD/BDCSVD.h +++ /dev/null @@ -1,937 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// We used the "A Divide-And-Conquer Algorithm for the Bidiagonal SVD" -// research report written by Ming Gu and Stanley C.Eisenstat -// The code variable names correspond to the names they used in their -// report -// -// Copyright (C) 2013 Gauthier Brun -// Copyright (C) 2013 Nicolas Carre -// Copyright (C) 2013 Jean Ceccato -// Copyright (C) 2013 Pierre Zoppitelli -// Copyright (C) 2013 Jitse Niesen -// -// Source Code Form is subject to the terms of the Mozilla -// Public License v. 2.0. If a copy of the MPL was not distributed -// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - -#ifndef EIGEN_BDCSVD_H -#define EIGEN_BDCSVD_H - -#define EPSILON 0.0000000000000001 - -#define ALGOSWAP 16 - -namespace Eigen { -/** \ingroup SVD_Module - * - * - * \class BDCSVD - * - * \brief class Bidiagonal Divide and Conquer SVD - * - * \param MatrixType the type of the matrix of which we are computing the SVD decomposition - * We plan to have a very similar interface to JacobiSVD on this class. - * It should be used to speed up the calcul of SVD for big matrices. - */ -template -class BDCSVD : public SVDBase<_MatrixType> -{ - typedef SVDBase<_MatrixType> Base; - -public: - using Base::rows; - using Base::cols; - - typedef _MatrixType MatrixType; - typedef typename MatrixType::Scalar Scalar; - typedef typename NumTraits::Real RealScalar; - typedef typename MatrixType::Index Index; - enum { - RowsAtCompileTime = MatrixType::RowsAtCompileTime, - ColsAtCompileTime = MatrixType::ColsAtCompileTime, - DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime), - MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, - MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, - MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime), - MatrixOptions = MatrixType::Options - }; - - typedef Matrix - MatrixUType; - typedef Matrix - MatrixVType; - typedef typename internal::plain_diag_type::type SingularValuesType; - typedef typename internal::plain_row_type::type RowType; - typedef typename internal::plain_col_type::type ColType; - typedef Matrix MatrixX; - typedef Matrix MatrixXr; - typedef Matrix VectorType; - typedef Array ArrayXr; - - /** \brief Default Constructor. - * - * The default constructor is useful in cases in which the user intends to - * perform decompositions via BDCSVD::compute(const MatrixType&). - */ - BDCSVD() - : SVDBase<_MatrixType>::SVDBase(), - algoswap(ALGOSWAP), m_numIters(0) - {} - - - /** \brief Default Constructor with memory preallocation - * - * Like the default constructor but with preallocation of the internal data - * according to the specified problem size. - * \sa BDCSVD() - */ - BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0) - : SVDBase<_MatrixType>::SVDBase(), - algoswap(ALGOSWAP), m_numIters(0) - { - allocate(rows, cols, computationOptions); - } - - /** \brief Constructor performing the decomposition of given matrix. - * - * \param matrix the matrix to decompose - * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. - * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, - * #ComputeFullV, #ComputeThinV. - * - * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not - * available with the (non - default) FullPivHouseholderQR preconditioner. - */ - BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0) - : SVDBase<_MatrixType>::SVDBase(), - algoswap(ALGOSWAP), m_numIters(0) - { - compute(matrix, computationOptions); - } - - ~BDCSVD() - { - } - /** \brief Method performing the decomposition of given matrix using custom options. - * - * \param matrix the matrix to decompose - * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. - * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, - * #ComputeFullV, #ComputeThinV. - * - * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not - * available with the (non - default) FullPivHouseholderQR preconditioner. - */ - SVDBase& compute(const MatrixType& matrix, unsigned int computationOptions); - - /** \brief Method performing the decomposition of given matrix using current options. - * - * \param matrix the matrix to decompose - * - * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int). - */ - SVDBase& compute(const MatrixType& matrix) - { - return compute(matrix, this->m_computationOptions); - } - - void setSwitchSize(int s) - { - eigen_assert(s>3 && "BDCSVD the size of the algo switch has to be greater than 3"); - algoswap = s; - } - - - /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A. - * - * \param b the right - hand - side of the equation to solve. - * - * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V. - * - * \note SVD solving is implicitly least - squares. Thus, this method serves both purposes of exact solving and least - squares solving. - * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$. - */ - template - inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(this->m_isInitialized && "BDCSVD is not initialized."); - eigen_assert(SVDBase<_MatrixType>::computeU() && SVDBase<_MatrixType>::computeV() && - "BDCSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice)."); - return internal::solve_retval(*this, b.derived()); - } - - - const MatrixUType& matrixU() const - { - eigen_assert(this->m_isInitialized && "SVD is not initialized."); - if (isTranspose){ - eigen_assert(this->computeV() && "This SVD decomposition didn't compute U. Did you ask for it?"); - return this->m_matrixV; - } - else - { - eigen_assert(this->computeU() && "This SVD decomposition didn't compute U. Did you ask for it?"); - return this->m_matrixU; - } - - } - - - const MatrixVType& matrixV() const - { - eigen_assert(this->m_isInitialized && "SVD is not initialized."); - if (isTranspose){ - eigen_assert(this->computeU() && "This SVD decomposition didn't compute V. Did you ask for it?"); - return this->m_matrixU; - } - else - { - eigen_assert(this->computeV() && "This SVD decomposition didn't compute V. Did you ask for it?"); - return this->m_matrixV; - } - } - -private: - void allocate(Index rows, Index cols, unsigned int computationOptions); - void divide(Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift); - void computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V); - void computeSingVals(const ArrayXr& col0, const ArrayXr& diag, VectorType& singVals, - ArrayXr& shifts, ArrayXr& mus); - void perturbCol0(const ArrayXr& col0, const ArrayXr& diag, const VectorType& singVals, - const ArrayXr& shifts, const ArrayXr& mus, ArrayXr& zhat); - void computeSingVecs(const ArrayXr& zhat, const ArrayXr& diag, const VectorType& singVals, - const ArrayXr& shifts, const ArrayXr& mus, MatrixXr& U, MatrixXr& V); - void deflation43(Index firstCol, Index shift, Index i, Index size); - void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size); - void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift); - void copyUV(const typename internal::UpperBidiagonalization::HouseholderUSequenceType& householderU, - const typename internal::UpperBidiagonalization::HouseholderVSequenceType& householderV); - -protected: - MatrixXr m_naiveU, m_naiveV; - MatrixXr m_computed; - Index nRec; - int algoswap; - bool isTranspose, compU, compV; - -public: - int m_numIters; -}; //end class BDCSVD - - -// Methode to allocate ans initialize matrix and attributs -template -void BDCSVD::allocate(Index rows, Index cols, unsigned int computationOptions) -{ - isTranspose = (cols > rows); - if (SVDBase::allocate(rows, cols, computationOptions)) return; - m_computed = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize ); - if (isTranspose){ - compU = this->computeU(); - compV = this->computeV(); - } - else - { - compV = this->computeU(); - compU = this->computeV(); - } - if (compU) m_naiveU = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize + 1 ); - else m_naiveU = MatrixXr::Zero(2, this->m_diagSize + 1 ); - - if (compV) m_naiveV = MatrixXr::Zero(this->m_diagSize, this->m_diagSize); - - - //should be changed for a cleaner implementation - if (isTranspose){ - bool aux; - if (this->computeU()||this->computeV()){ - aux = this->m_computeFullU; - this->m_computeFullU = this->m_computeFullV; - this->m_computeFullV = aux; - aux = this->m_computeThinU; - this->m_computeThinU = this->m_computeThinV; - this->m_computeThinV = aux; - } - } -}// end allocate - -// Methode which compute the BDCSVD for the int -template<> -SVDBase >& -BDCSVD >::compute(const MatrixType& matrix, unsigned int computationOptions) { - allocate(matrix.rows(), matrix.cols(), computationOptions); - this->m_nonzeroSingularValues = 0; - m_computed = Matrix::Zero(rows(), cols()); - for (int i=0; im_diagSize; i++) { - this->m_singularValues.coeffRef(i) = 0; - } - if (this->m_computeFullU) this->m_matrixU = Matrix::Zero(rows(), rows()); - if (this->m_computeFullV) this->m_matrixV = Matrix::Zero(cols(), cols()); - this->m_isInitialized = true; - return *this; -} - - -// Methode which compute the BDCSVD -template -SVDBase& -BDCSVD::compute(const MatrixType& matrix, unsigned int computationOptions) -{ - allocate(matrix.rows(), matrix.cols(), computationOptions); - using std::abs; - - //**** step 1 Bidiagonalization isTranspose = (matrix.cols()>matrix.rows()) ; - MatrixType copy; - if (isTranspose) copy = matrix.adjoint(); - else copy = matrix; - - internal::UpperBidiagonalization bid(copy); - - //**** step 2 Divide - m_computed.topRows(this->m_diagSize) = bid.bidiagonal().toDenseMatrix().transpose(); - m_computed.template bottomRows<1>().setZero(); - divide(0, this->m_diagSize - 1, 0, 0, 0); - - //**** step 3 copy - for (int i=0; im_diagSize; i++) { - RealScalar a = abs(m_computed.coeff(i, i)); - this->m_singularValues.coeffRef(i) = a; - if (a == 0){ - this->m_nonzeroSingularValues = i; - this->m_singularValues.tail(this->m_diagSize - i - 1).setZero(); - break; - } - else if (i == this->m_diagSize - 1) - { - this->m_nonzeroSingularValues = i + 1; - break; - } - } - copyUV(bid.householderU(), bid.householderV()); - this->m_isInitialized = true; - return *this; -}// end compute - - -template -void BDCSVD::copyUV(const typename internal::UpperBidiagonalization::HouseholderUSequenceType& householderU, - const typename internal::UpperBidiagonalization::HouseholderVSequenceType& householderV) -{ - // Note exchange of U and V: m_matrixU is set from m_naiveV and vice versa - if (this->computeU()){ - Index Ucols = this->m_computeThinU ? this->m_nonzeroSingularValues : householderU.cols(); - this->m_matrixU = MatrixX::Identity(householderU.cols(), Ucols); - Index blockCols = this->m_computeThinU ? this->m_nonzeroSingularValues : this->m_diagSize; - this->m_matrixU.block(0, 0, this->m_diagSize, blockCols) = - m_naiveV.template cast().block(0, 0, this->m_diagSize, blockCols); - this->m_matrixU = householderU * this->m_matrixU; - } - if (this->computeV()){ - Index Vcols = this->m_computeThinV ? this->m_nonzeroSingularValues : householderV.cols(); - this->m_matrixV = MatrixX::Identity(householderV.cols(), Vcols); - Index blockCols = this->m_computeThinV ? this->m_nonzeroSingularValues : this->m_diagSize; - this->m_matrixV.block(0, 0, this->m_diagSize, blockCols) = - m_naiveU.template cast().block(0, 0, this->m_diagSize, blockCols); - this->m_matrixV = householderV * this->m_matrixV; - } -} - -// The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the -// place of the submatrix we are currently working on. - -//@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU; -//@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU; -// lastCol + 1 - firstCol is the size of the submatrix. -//@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W) -//@param firstRowW : Same as firstRowW with the column. -//@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix -// to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper. -template -void BDCSVD::divide (Index firstCol, Index lastCol, Index firstRowW, - Index firstColW, Index shift) -{ - // requires nbRows = nbCols + 1; - using std::pow; - using std::sqrt; - using std::abs; - const Index n = lastCol - firstCol + 1; - const Index k = n/2; - RealScalar alphaK; - RealScalar betaK; - RealScalar r0; - RealScalar lambda, phi, c0, s0; - MatrixXr l, f; - // We use the other algorithm which is more efficient for small - // matrices. - if (n < algoswap){ - JacobiSVD b(m_computed.block(firstCol, firstCol, n + 1, n), - ComputeFullU | (ComputeFullV * compV)) ; - if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() << b.matrixU(); - else - { - m_naiveU.row(0).segment(firstCol, n + 1).real() << b.matrixU().row(0); - m_naiveU.row(1).segment(firstCol, n + 1).real() << b.matrixU().row(n); - } - if (compV) m_naiveV.block(firstRowW, firstColW, n, n).real() << b.matrixV(); - m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero(); - for (int i=0; i= firstCol; i--) - { - m_naiveU.col(i + 1).segment(firstCol, k + 1) << m_naiveU.col(i).segment(firstCol, k + 1); - } - // we shift q1 at the left with a factor c0 - m_naiveU.col(firstCol).segment( firstCol, k + 1) << (q1 * c0); - // last column = q1 * - s0 - m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) << (q1 * ( - s0)); - // first column = q2 * s0 - m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) << - m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *s0; - // q2 *= c0 - m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0; - } - else - { - RealScalar q1 = (m_naiveU(0, firstCol + k)); - // we shift Q1 to the right - for (Index i = firstCol + k - 1; i >= firstCol; i--) - { - m_naiveU(0, i + 1) = m_naiveU(0, i); - } - // we shift q1 at the left with a factor c0 - m_naiveU(0, firstCol) = (q1 * c0); - // last column = q1 * - s0 - m_naiveU(0, lastCol + 1) = (q1 * ( - s0)); - // first column = q2 * s0 - m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0; - // q2 *= c0 - m_naiveU(1, lastCol + 1) *= c0; - m_naiveU.row(1).segment(firstCol + 1, k).setZero(); - m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero(); - } - m_computed(firstCol + shift, firstCol + shift) = r0; - m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) << alphaK * l.transpose().real(); - m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) << betaK * f.transpose().real(); - - - // Second part: try to deflate singular values in combined matrix - deflation(firstCol, lastCol, k, firstRowW, firstColW, shift); - - // Third part: compute SVD of combined matrix - MatrixXr UofSVD, VofSVD; - VectorType singVals; - computeSVDofM(firstCol + shift, n, UofSVD, singVals, VofSVD); - if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1) *= UofSVD; - else m_naiveU.block(0, firstCol, 2, n + 1) *= UofSVD; - if (compV) m_naiveV.block(firstRowW, firstColW, n, n) *= VofSVD; - m_computed.block(firstCol + shift, firstCol + shift, n, n).setZero(); - m_computed.block(firstCol + shift, firstCol + shift, n, n).diagonal() = singVals; -}// end divide - -// Compute SVD of m_computed.block(firstCol, firstCol, n + 1, n); this block only has non-zeros in -// the first column and on the diagonal and has undergone deflation, so diagonal is in increasing -// order except for possibly the (0,0) entry. The computed SVD is stored U, singVals and V, except -// that if compV is false, then V is not computed. Singular values are sorted in decreasing order. -// -// TODO Opportunities for optimization: better root finding algo, better stopping criterion, better -// handling of round-off errors, be consistent in ordering -template -void BDCSVD::computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V) -{ - // TODO Get rid of these copies (?) - ArrayXr col0 = m_computed.block(firstCol, firstCol, n, 1); - ArrayXr diag = m_computed.block(firstCol, firstCol, n, n).diagonal(); - diag(0) = 0; - - // compute singular values and vectors (in decreasing order) - singVals.resize(n); - U.resize(n+1, n+1); - if (compV) V.resize(n, n); - - if (col0.hasNaN() || diag.hasNaN()) return; - - ArrayXr shifts(n), mus(n), zhat(n); - computeSingVals(col0, diag, singVals, shifts, mus); - perturbCol0(col0, diag, singVals, shifts, mus, zhat); - computeSingVecs(zhat, diag, singVals, shifts, mus, U, V); - - // Reverse order so that singular values in increased order - singVals.reverseInPlace(); - U.leftCols(n) = U.leftCols(n).rowwise().reverse().eval(); - if (compV) V = V.rowwise().reverse().eval(); -} - -template -void BDCSVD::computeSingVals(const ArrayXr& col0, const ArrayXr& diag, - VectorType& singVals, ArrayXr& shifts, ArrayXr& mus) -{ - using std::abs; - using std::swap; - - Index n = col0.size(); - for (Index k = 0; k < n; ++k) { - if (col0(k) == 0) { - // entry is deflated, so singular value is on diagonal - singVals(k) = diag(k); - mus(k) = 0; - shifts(k) = diag(k); - continue; - } - - // otherwise, use secular equation to find singular value - RealScalar left = diag(k); - RealScalar right = (k != n-1) ? diag(k+1) : (diag(n-1) + col0.matrix().norm()); - - // first decide whether it's closer to the left end or the right end - RealScalar mid = left + (right-left) / 2; - RealScalar fMid = 1 + (col0.square() / ((diag + mid) * (diag - mid))).sum(); - - RealScalar shift; - if (k == n-1 || fMid > 0) shift = left; - else shift = right; - - // measure everything relative to shift - ArrayXr diagShifted = diag - shift; - - // initial guess - RealScalar muPrev, muCur; - if (shift == left) { - muPrev = (right - left) * 0.1; - if (k == n-1) muCur = right - left; - else muCur = (right - left) * 0.5; - } else { - muPrev = -(right - left) * 0.1; - muCur = -(right - left) * 0.5; - } - - RealScalar fPrev = 1 + (col0.square() / ((diagShifted - muPrev) * (diag + shift + muPrev))).sum(); - RealScalar fCur = 1 + (col0.square() / ((diagShifted - muCur) * (diag + shift + muCur))).sum(); - if (abs(fPrev) < abs(fCur)) { - swap(fPrev, fCur); - swap(muPrev, muCur); - } - - // rational interpolation: fit a function of the form a / mu + b through the two previous - // iterates and use its zero to compute the next iterate - bool useBisection = false; - while (abs(muCur - muPrev) > 8 * NumTraits::epsilon() * (std::max)(abs(muCur), abs(muPrev)) && fCur != fPrev && !useBisection) { - ++m_numIters; - - RealScalar a = (fCur - fPrev) / (1/muCur - 1/muPrev); - RealScalar b = fCur - a / muCur; - - muPrev = muCur; - fPrev = fCur; - muCur = -a / b; - fCur = 1 + (col0.square() / ((diagShifted - muCur) * (diag + shift + muCur))).sum(); - - if (shift == left && (muCur < 0 || muCur > right - left)) useBisection = true; - if (shift == right && (muCur < -(right - left) || muCur > 0)) useBisection = true; - } - - // fall back on bisection method if rational interpolation did not work - if (useBisection) { - RealScalar leftShifted, rightShifted; - if (shift == left) { - leftShifted = 1e-30; - if (k == 0) rightShifted = right - left; - else rightShifted = (right - left) * 0.6; // theoretically we can take 0.5, but let's be safe - } else { - leftShifted = -(right - left) * 0.6; - rightShifted = -1e-30; - } - - RealScalar fLeft = 1 + (col0.square() / ((diagShifted - leftShifted) * (diag + shift + leftShifted))).sum(); - RealScalar fRight = 1 + (col0.square() / ((diagShifted - rightShifted) * (diag + shift + rightShifted))).sum(); - assert(fLeft * fRight < 0); - - while (rightShifted - leftShifted > 2 * NumTraits::epsilon() * (std::max)(abs(leftShifted), abs(rightShifted))) { - RealScalar midShifted = (leftShifted + rightShifted) / 2; - RealScalar fMid = 1 + (col0.square() / ((diagShifted - midShifted) * (diag + shift + midShifted))).sum(); - if (fLeft * fMid < 0) { - rightShifted = midShifted; - fRight = fMid; - } else { - leftShifted = midShifted; - fLeft = fMid; - } - } - - muCur = (leftShifted + rightShifted) / 2; - } - - singVals[k] = shift + muCur; - shifts[k] = shift; - mus[k] = muCur; - - // perturb singular value slightly if it equals diagonal entry to avoid division by zero later - // (deflation is supposed to avoid this from happening) - if (singVals[k] == left) singVals[k] *= 1 + NumTraits::epsilon(); - if (singVals[k] == right) singVals[k] *= 1 - NumTraits::epsilon(); - } -} - - -// zhat is perturbation of col0 for which singular vectors can be computed stably (see Section 3.1) -template -void BDCSVD::perturbCol0 - (const ArrayXr& col0, const ArrayXr& diag, const VectorType& singVals, - const ArrayXr& shifts, const ArrayXr& mus, ArrayXr& zhat) -{ - Index n = col0.size(); - for (Index k = 0; k < n; ++k) { - if (col0(k) == 0) - zhat(k) = 0; - else { - // see equation (3.6) - using std::sqrt; - RealScalar tmp = - sqrt( - (singVals(n-1) + diag(k)) * (mus(n-1) + (shifts(n-1) - diag(k))) - * ( - ((singVals.head(k).array() + diag(k)) * (mus.head(k) + (shifts.head(k) - diag(k)))) - / ((diag.head(k).array() + diag(k)) * (diag.head(k).array() - diag(k))) - ).prod() - * ( - ((singVals.segment(k, n-k-1).array() + diag(k)) * (mus.segment(k, n-k-1) + (shifts.segment(k, n-k-1) - diag(k)))) - / ((diag.tail(n-k-1) + diag(k)) * (diag.tail(n-k-1) - diag(k))) - ).prod() - ); - if (col0(k) > 0) zhat(k) = tmp; - else zhat(k) = -tmp; - } - } -} - -// compute singular vectors -template -void BDCSVD::computeSingVecs - (const ArrayXr& zhat, const ArrayXr& diag, const VectorType& singVals, - const ArrayXr& shifts, const ArrayXr& mus, MatrixXr& U, MatrixXr& V) -{ - Index n = zhat.size(); - for (Index k = 0; k < n; ++k) { - if (zhat(k) == 0) { - U.col(k) = VectorType::Unit(n+1, k); - if (compV) V.col(k) = VectorType::Unit(n, k); - } else { - U.col(k).head(n) = zhat / (((diag - shifts(k)) - mus(k)) * (diag + singVals[k])); - U(n,k) = 0; - U.col(k).normalize(); - - if (compV) { - V.col(k).tail(n-1) = (diag * zhat / (((diag - shifts(k)) - mus(k)) * (diag + singVals[k]))).tail(n-1); - V(0,k) = -1; - V.col(k).normalize(); - } - } - } - U.col(n) = VectorType::Unit(n+1, n); -} - - -// page 12_13 -// i >= 1, di almost null and zi non null. -// We use a rotation to zero out zi applied to the left of M -template -void BDCSVD::deflation43(Index firstCol, Index shift, Index i, Index size){ - using std::abs; - using std::sqrt; - using std::pow; - RealScalar c = m_computed(firstCol + shift, firstCol + shift); - RealScalar s = m_computed(i, firstCol + shift); - RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2)); - if (r == 0){ - m_computed(i, i)=0; - return; - } - c/=r; - s/=r; - m_computed(firstCol + shift, firstCol + shift) = r; - m_computed(i, firstCol + shift) = 0; - m_computed(i, i) = 0; - if (compU){ - m_naiveU.col(firstCol).segment(firstCol,size) = - c * m_naiveU.col(firstCol).segment(firstCol, size) - - s * m_naiveU.col(i).segment(firstCol, size) ; - - m_naiveU.col(i).segment(firstCol, size) = - (c + s*s/c) * m_naiveU.col(i).segment(firstCol, size) + - (s/c) * m_naiveU.col(firstCol).segment(firstCol,size); - } -}// end deflation 43 - - -// page 13 -// i,j >= 1, i != j and |di - dj| < epsilon * norm2(M) -// We apply two rotations to have zj = 0; -template -void BDCSVD::deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size){ - using std::abs; - using std::sqrt; - using std::conj; - using std::pow; - RealScalar c = m_computed(firstColm, firstColm + j - 1); - RealScalar s = m_computed(firstColm, firstColm + i - 1); - RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2)); - if (r==0){ - m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j); - return; - } - c/=r; - s/=r; - m_computed(firstColm + i, firstColm) = r; - m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j); - m_computed(firstColm + j, firstColm) = 0; - if (compU){ - m_naiveU.col(firstColu + i).segment(firstColu, size) = - c * m_naiveU.col(firstColu + i).segment(firstColu, size) - - s * m_naiveU.col(firstColu + j).segment(firstColu, size) ; - - m_naiveU.col(firstColu + j).segment(firstColu, size) = - (c + s*s/c) * m_naiveU.col(firstColu + j).segment(firstColu, size) + - (s/c) * m_naiveU.col(firstColu + i).segment(firstColu, size); - } - if (compV){ - m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) = - c * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) + - s * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) ; - - m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) = - (c + s*s/c) * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) - - (s/c) * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1); - } -}// end deflation 44 - - -// acts on block from (firstCol+shift, firstCol+shift) to (lastCol+shift, lastCol+shift) [inclusive] -template -void BDCSVD::deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift){ - //condition 4.1 - using std::sqrt; - const Index length = lastCol + 1 - firstCol; - RealScalar norm1 = m_computed.block(firstCol+shift, firstCol+shift, length, 1).squaredNorm(); - RealScalar norm2 = m_computed.block(firstCol+shift, firstCol+shift, length, length).diagonal().squaredNorm(); - RealScalar EPS = 10 * NumTraits::epsilon() * sqrt(norm1 + norm2); - if (m_computed(firstCol + shift, firstCol + shift) < EPS){ - m_computed(firstCol + shift, firstCol + shift) = EPS; - } - - //condition 4.2 - for (Index i=firstCol + shift + 1;i<=lastCol + shift;i++){ - if (std::abs(m_computed(i, firstCol + shift)) < EPS){ - m_computed(i, firstCol + shift) = 0; - } - } - - //condition 4.3 - for (Index i=firstCol + shift + 1;i<=lastCol + shift; i++){ - if (m_computed(i, i) < EPS){ - deflation43(firstCol, shift, i, length); - } - } - - //condition 4.4 - - Index i=firstCol + shift + 1, j=firstCol + shift + k + 1; - //we stock the final place of each line - Index *permutation = new Index[length]; - - for (Index p =1; p < length; p++) { - if (i> firstCol + shift + k){ - permutation[p] = j; - j++; - } else if (j> lastCol + shift) - { - permutation[p] = i; - i++; - } - else - { - if (m_computed(i, i) < m_computed(j, j)){ - permutation[p] = j; - j++; - } - else - { - permutation[p] = i; - i++; - } - } - } - //we do the permutation - RealScalar aux; - //we stock the current index of each col - //and the column of each index - Index *realInd = new Index[length]; - Index *realCol = new Index[length]; - for (int pos = 0; pos< length; pos++){ - realCol[pos] = pos + firstCol + shift; - realInd[pos] = pos; - } - const Index Zero = firstCol + shift; - VectorType temp; - for (int i = 1; i < length - 1; i++){ - const Index I = i + Zero; - const Index realI = realInd[i]; - const Index j = permutation[length - i] - Zero; - const Index J = realCol[j]; - - //diag displace - aux = m_computed(I, I); - m_computed(I, I) = m_computed(J, J); - m_computed(J, J) = aux; - - //firstrow displace - aux = m_computed(I, Zero); - m_computed(I, Zero) = m_computed(J, Zero); - m_computed(J, Zero) = aux; - - // change columns - if (compU) { - temp = m_naiveU.col(I - shift).segment(firstCol, length + 1); - m_naiveU.col(I - shift).segment(firstCol, length + 1) << - m_naiveU.col(J - shift).segment(firstCol, length + 1); - m_naiveU.col(J - shift).segment(firstCol, length + 1) << temp; - } - else - { - temp = m_naiveU.col(I - shift).segment(0, 2); - m_naiveU.col(I - shift).segment(0, 2) << - m_naiveU.col(J - shift).segment(0, 2); - m_naiveU.col(J - shift).segment(0, 2) << temp; - } - if (compV) { - const Index CWI = I + firstColW - Zero; - const Index CWJ = J + firstColW - Zero; - temp = m_naiveV.col(CWI).segment(firstRowW, length); - m_naiveV.col(CWI).segment(firstRowW, length) << m_naiveV.col(CWJ).segment(firstRowW, length); - m_naiveV.col(CWJ).segment(firstRowW, length) << temp; - } - - //update real pos - realCol[realI] = J; - realCol[j] = I; - realInd[J - Zero] = realI; - realInd[I - Zero] = j; - } - for (Index i = firstCol + shift + 1; i -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef BDCSVD<_MatrixType> BDCSVDType; - EIGEN_MAKE_SOLVE_HELPERS(BDCSVDType, Rhs) - - template void evalTo(Dest& dst) const - { - eigen_assert(rhs().rows() == dec().rows()); - // A = U S V^* - // So A^{ - 1} = V S^{ - 1} U^* - Index diagSize = (std::min)(dec().rows(), dec().cols()); - typename BDCSVDType::SingularValuesType invertedSingVals(diagSize); - Index nonzeroSingVals = dec().nonzeroSingularValues(); - invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse(); - invertedSingVals.tail(diagSize - nonzeroSingVals).setZero(); - - dst = dec().matrixV().leftCols(diagSize) - * invertedSingVals.asDiagonal() - * dec().matrixU().leftCols(diagSize).adjoint() - * rhs(); - return; - } -}; - -} //end namespace internal - - /** \svd_module - * - * \return the singular value decomposition of \c *this computed by - * BDC Algorithm - * - * \sa class BDCSVD - */ -/* -template -BDCSVD::PlainObject> -MatrixBase::bdcSvd(unsigned int computationOptions) const -{ - return BDCSVD(*this, computationOptions); -} -*/ - -} // end namespace Eigen - -#endif diff --git a/unsupported/Eigen/src/SVD/CMakeLists.txt b/unsupported/Eigen/src/SVD/CMakeLists.txt deleted file mode 100644 index b40baf092..000000000 --- a/unsupported/Eigen/src/SVD/CMakeLists.txt +++ /dev/null @@ -1,6 +0,0 @@ -FILE(GLOB Eigen_SVD_SRCS "*.h") - -INSTALL(FILES - ${Eigen_SVD_SRCS} - DESTINATION ${INCLUDE_INSTALL_DIR}unsupported/Eigen/src/SVD COMPONENT Devel - ) diff --git a/unsupported/Eigen/src/SVD/JacobiSVD.h b/unsupported/Eigen/src/SVD/JacobiSVD.h deleted file mode 100644 index 02fac409e..000000000 --- a/unsupported/Eigen/src/SVD/JacobiSVD.h +++ /dev/null @@ -1,782 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2009-2010 Benoit Jacob -// -// This Source Code Form is subject to the terms of the Mozilla -// Public License v. 2.0. If a copy of the MPL was not distributed -// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - -#ifndef EIGEN_JACOBISVD_H -#define EIGEN_JACOBISVD_H - -namespace Eigen { - -namespace internal { -// forward declaration (needed by ICC) -// the empty body is required by MSVC -template::IsComplex> -struct svd_precondition_2x2_block_to_be_real {}; - -/*** QR preconditioners (R-SVD) - *** - *** Their role is to reduce the problem of computing the SVD to the case of a square matrix. - *** This approach, known as R-SVD, is an optimization for rectangular-enough matrices, and is a requirement for - *** JacobiSVD which by itself is only able to work on square matrices. - ***/ - -enum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols }; - -template -struct qr_preconditioner_should_do_anything -{ - enum { a = MatrixType::RowsAtCompileTime != Dynamic && - MatrixType::ColsAtCompileTime != Dynamic && - MatrixType::ColsAtCompileTime <= MatrixType::RowsAtCompileTime, - b = MatrixType::RowsAtCompileTime != Dynamic && - MatrixType::ColsAtCompileTime != Dynamic && - MatrixType::RowsAtCompileTime <= MatrixType::ColsAtCompileTime, - ret = !( (QRPreconditioner == NoQRPreconditioner) || - (Case == PreconditionIfMoreColsThanRows && bool(a)) || - (Case == PreconditionIfMoreRowsThanCols && bool(b)) ) - }; -}; - -template::ret -> struct qr_preconditioner_impl {}; - -template -class qr_preconditioner_impl -{ -public: - typedef typename MatrixType::Index Index; - void allocate(const JacobiSVD&) {} - bool run(JacobiSVD&, const MatrixType&) - { - return false; - } -}; - -/*** preconditioner using FullPivHouseholderQR ***/ - -template -class qr_preconditioner_impl -{ -public: - typedef typename MatrixType::Index Index; - typedef typename MatrixType::Scalar Scalar; - enum - { - RowsAtCompileTime = MatrixType::RowsAtCompileTime, - MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime - }; - typedef Matrix WorkspaceType; - - void allocate(const JacobiSVD& svd) - { - if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) - { - m_qr.~QRType(); - ::new (&m_qr) QRType(svd.rows(), svd.cols()); - } - if (svd.m_computeFullU) m_workspace.resize(svd.rows()); - } - - bool run(JacobiSVD& svd, const MatrixType& matrix) - { - if(matrix.rows() > matrix.cols()) - { - m_qr.compute(matrix); - svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); - if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace); - if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation(); - return true; - } - return false; - } -private: - typedef FullPivHouseholderQR QRType; - QRType m_qr; - WorkspaceType m_workspace; -}; - -template -class qr_preconditioner_impl -{ -public: - typedef typename MatrixType::Index Index; - typedef typename MatrixType::Scalar Scalar; - enum - { - RowsAtCompileTime = MatrixType::RowsAtCompileTime, - ColsAtCompileTime = MatrixType::ColsAtCompileTime, - MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, - MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, - Options = MatrixType::Options - }; - typedef Matrix - TransposeTypeWithSameStorageOrder; - - void allocate(const JacobiSVD& svd) - { - if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) - { - m_qr.~QRType(); - ::new (&m_qr) QRType(svd.cols(), svd.rows()); - } - m_adjoint.resize(svd.cols(), svd.rows()); - if (svd.m_computeFullV) m_workspace.resize(svd.cols()); - } - - bool run(JacobiSVD& svd, const MatrixType& matrix) - { - if(matrix.cols() > matrix.rows()) - { - m_adjoint = matrix.adjoint(); - m_qr.compute(m_adjoint); - svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); - if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace); - if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation(); - return true; - } - else return false; - } -private: - typedef FullPivHouseholderQR QRType; - QRType m_qr; - TransposeTypeWithSameStorageOrder m_adjoint; - typename internal::plain_row_type::type m_workspace; -}; - -/*** preconditioner using ColPivHouseholderQR ***/ - -template -class qr_preconditioner_impl -{ -public: - typedef typename MatrixType::Index Index; - - void allocate(const JacobiSVD& svd) - { - if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) - { - m_qr.~QRType(); - ::new (&m_qr) QRType(svd.rows(), svd.cols()); - } - if (svd.m_computeFullU) m_workspace.resize(svd.rows()); - else if (svd.m_computeThinU) m_workspace.resize(svd.cols()); - } - - bool run(JacobiSVD& svd, const MatrixType& matrix) - { - if(matrix.rows() > matrix.cols()) - { - m_qr.compute(matrix); - svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); - if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace); - else if(svd.m_computeThinU) - { - svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols()); - m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace); - } - if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation(); - return true; - } - return false; - } - -private: - typedef ColPivHouseholderQR QRType; - QRType m_qr; - typename internal::plain_col_type::type m_workspace; -}; - -template -class qr_preconditioner_impl -{ -public: - typedef typename MatrixType::Index Index; - typedef typename MatrixType::Scalar Scalar; - enum - { - RowsAtCompileTime = MatrixType::RowsAtCompileTime, - ColsAtCompileTime = MatrixType::ColsAtCompileTime, - MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, - MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, - Options = MatrixType::Options - }; - - typedef Matrix - TransposeTypeWithSameStorageOrder; - - void allocate(const JacobiSVD& svd) - { - if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) - { - m_qr.~QRType(); - ::new (&m_qr) QRType(svd.cols(), svd.rows()); - } - if (svd.m_computeFullV) m_workspace.resize(svd.cols()); - else if (svd.m_computeThinV) m_workspace.resize(svd.rows()); - m_adjoint.resize(svd.cols(), svd.rows()); - } - - bool run(JacobiSVD& svd, const MatrixType& matrix) - { - if(matrix.cols() > matrix.rows()) - { - m_adjoint = matrix.adjoint(); - m_qr.compute(m_adjoint); - - svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); - if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace); - else if(svd.m_computeThinV) - { - svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows()); - m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace); - } - if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation(); - return true; - } - else return false; - } - -private: - typedef ColPivHouseholderQR QRType; - QRType m_qr; - TransposeTypeWithSameStorageOrder m_adjoint; - typename internal::plain_row_type::type m_workspace; -}; - -/*** preconditioner using HouseholderQR ***/ - -template -class qr_preconditioner_impl -{ -public: - typedef typename MatrixType::Index Index; - - void allocate(const JacobiSVD& svd) - { - if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) - { - m_qr.~QRType(); - ::new (&m_qr) QRType(svd.rows(), svd.cols()); - } - if (svd.m_computeFullU) m_workspace.resize(svd.rows()); - else if (svd.m_computeThinU) m_workspace.resize(svd.cols()); - } - - bool run(JacobiSVD& svd, const MatrixType& matrix) - { - if(matrix.rows() > matrix.cols()) - { - m_qr.compute(matrix); - svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); - if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace); - else if(svd.m_computeThinU) - { - svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols()); - m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace); - } - if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols()); - return true; - } - return false; - } -private: - typedef HouseholderQR QRType; - QRType m_qr; - typename internal::plain_col_type::type m_workspace; -}; - -template -class qr_preconditioner_impl -{ -public: - typedef typename MatrixType::Index Index; - typedef typename MatrixType::Scalar Scalar; - enum - { - RowsAtCompileTime = MatrixType::RowsAtCompileTime, - ColsAtCompileTime = MatrixType::ColsAtCompileTime, - MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, - MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, - Options = MatrixType::Options - }; - - typedef Matrix - TransposeTypeWithSameStorageOrder; - - void allocate(const JacobiSVD& svd) - { - if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) - { - m_qr.~QRType(); - ::new (&m_qr) QRType(svd.cols(), svd.rows()); - } - if (svd.m_computeFullV) m_workspace.resize(svd.cols()); - else if (svd.m_computeThinV) m_workspace.resize(svd.rows()); - m_adjoint.resize(svd.cols(), svd.rows()); - } - - bool run(JacobiSVD& svd, const MatrixType& matrix) - { - if(matrix.cols() > matrix.rows()) - { - m_adjoint = matrix.adjoint(); - m_qr.compute(m_adjoint); - - svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); - if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace); - else if(svd.m_computeThinV) - { - svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows()); - m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace); - } - if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows()); - return true; - } - else return false; - } - -private: - typedef HouseholderQR QRType; - QRType m_qr; - TransposeTypeWithSameStorageOrder m_adjoint; - typename internal::plain_row_type::type m_workspace; -}; - -/*** 2x2 SVD implementation - *** - *** JacobiSVD consists in performing a series of 2x2 SVD subproblems - ***/ - -template -struct svd_precondition_2x2_block_to_be_real -{ - typedef JacobiSVD SVD; - typedef typename SVD::Index Index; - static void run(typename SVD::WorkMatrixType&, SVD&, Index, Index) {} -}; - -template -struct svd_precondition_2x2_block_to_be_real -{ - typedef JacobiSVD SVD; - typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::RealScalar RealScalar; - typedef typename SVD::Index Index; - static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q) - { - using std::sqrt; - Scalar z; - JacobiRotation rot; - RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p))); - if(n==0) - { - z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); - work_matrix.row(p) *= z; - if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z); - z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); - work_matrix.row(q) *= z; - if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z); - } - else - { - rot.c() = conj(work_matrix.coeff(p,p)) / n; - rot.s() = work_matrix.coeff(q,p) / n; - work_matrix.applyOnTheLeft(p,q,rot); - if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint()); - if(work_matrix.coeff(p,q) != Scalar(0)) - { - Scalar z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); - work_matrix.col(q) *= z; - if(svd.computeV()) svd.m_matrixV.col(q) *= z; - } - if(work_matrix.coeff(q,q) != Scalar(0)) - { - z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); - work_matrix.row(q) *= z; - if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z); - } - } - } -}; - -template -void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q, - JacobiRotation *j_left, - JacobiRotation *j_right) -{ - using std::sqrt; - Matrix m; - m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)), - numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q)); - JacobiRotation rot1; - RealScalar t = m.coeff(0,0) + m.coeff(1,1); - RealScalar d = m.coeff(1,0) - m.coeff(0,1); - if(t == RealScalar(0)) - { - rot1.c() = RealScalar(0); - rot1.s() = d > RealScalar(0) ? RealScalar(1) : RealScalar(-1); - } - else - { - RealScalar u = d / t; - rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + numext::abs2(u)); - rot1.s() = rot1.c() * u; - } - m.applyOnTheLeft(0,1,rot1); - j_right->makeJacobi(m,0,1); - *j_left = rot1 * j_right->transpose(); -} - -} // end namespace internal - -/** \ingroup SVD_Module - * - * - * \class JacobiSVD - * - * \brief Two-sided Jacobi SVD decomposition of a rectangular matrix - * - * \param MatrixType the type of the matrix of which we are computing the SVD decomposition - * \param QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally - * for the R-SVD step for non-square matrices. See discussion of possible values below. - * - * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product - * \f[ A = U S V^* \f] - * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal; - * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left - * and right \em singular \em vectors of \a A respectively. - * - * Singular values are always sorted in decreasing order. - * - * This JacobiSVD decomposition computes only the singular values by default. If you want \a U or \a V, you need to ask for them explicitly. - * - * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the - * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual - * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix, - * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving. - * - * Here's an example demonstrating basic usage: - * \include JacobiSVD_basic.cpp - * Output: \verbinclude JacobiSVD_basic.out - * - * This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than - * bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \f$ O(n^2p) \f$ where \a n is the smaller dimension and - * \a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms. - * In particular, like any R-SVD, it takes advantage of non-squareness in that its complexity is only linear in the greater dimension. - * - * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to - * terminate in finite (and reasonable) time. - * - * The possible values for QRPreconditioner are: - * \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR. - * \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR. - * Contrary to other QRs, it doesn't allow computing thin unitaries. - * \li HouseholderQRPreconditioner is the fastest, and less safe and accurate than the pivoting variants. It uses non-pivoting QR. - * This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization - * is inherently non-pivoting). However the resulting SVD is still more reliable than bidiagonalizing SVDs because the Jacobi-based iterarive - * process is more reliable than the optimized bidiagonal SVD iterations. - * \li NoQRPreconditioner allows not to use a QR preconditioner at all. This is useful if you know that you will only be computing - * JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in - * faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking - * if QR preconditioning is needed before applying it anyway. - * - * \sa MatrixBase::jacobiSvd() - */ -template -class JacobiSVD : public SVDBase<_MatrixType> -{ - public: - - typedef _MatrixType MatrixType; - typedef typename MatrixType::Scalar Scalar; - typedef typename NumTraits::Real RealScalar; - typedef typename MatrixType::Index Index; - enum { - RowsAtCompileTime = MatrixType::RowsAtCompileTime, - ColsAtCompileTime = MatrixType::ColsAtCompileTime, - DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime), - MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, - MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, - MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime), - MatrixOptions = MatrixType::Options - }; - - typedef Matrix - MatrixUType; - typedef Matrix - MatrixVType; - typedef typename internal::plain_diag_type::type SingularValuesType; - typedef typename internal::plain_row_type::type RowType; - typedef typename internal::plain_col_type::type ColType; - typedef Matrix - WorkMatrixType; - - /** \brief Default Constructor. - * - * The default constructor is useful in cases in which the user intends to - * perform decompositions via JacobiSVD::compute(const MatrixType&). - */ - JacobiSVD() - : SVDBase<_MatrixType>::SVDBase() - {} - - - /** \brief Default Constructor with memory preallocation - * - * Like the default constructor but with preallocation of the internal data - * according to the specified problem size. - * \sa JacobiSVD() - */ - JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0) - : SVDBase<_MatrixType>::SVDBase() - { - allocate(rows, cols, computationOptions); - } - - /** \brief Constructor performing the decomposition of given matrix. - * - * \param matrix the matrix to decompose - * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. - * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU, - * #ComputeFullV, #ComputeThinV. - * - * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not - * available with the (non-default) FullPivHouseholderQR preconditioner. - */ - JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0) - : SVDBase<_MatrixType>::SVDBase() - { - compute(matrix, computationOptions); - } - - /** \brief Method performing the decomposition of given matrix using custom options. - * - * \param matrix the matrix to decompose - * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. - * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU, - * #ComputeFullV, #ComputeThinV. - * - * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not - * available with the (non-default) FullPivHouseholderQR preconditioner. - */ - SVDBase& compute(const MatrixType& matrix, unsigned int computationOptions); - - /** \brief Method performing the decomposition of given matrix using current options. - * - * \param matrix the matrix to decompose - * - * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int). - */ - SVDBase& compute(const MatrixType& matrix) - { - return compute(matrix, this->m_computationOptions); - } - - /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A. - * - * \param b the right-hand-side of the equation to solve. - * - * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V. - * - * \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving. - * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$. - */ - template - inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(this->m_isInitialized && "JacobiSVD is not initialized."); - eigen_assert(SVDBase::computeU() && SVDBase::computeV() && "JacobiSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice)."); - return internal::solve_retval(*this, b.derived()); - } - - - - private: - void allocate(Index rows, Index cols, unsigned int computationOptions); - - protected: - WorkMatrixType m_workMatrix; - - template - friend struct internal::svd_precondition_2x2_block_to_be_real; - template - friend struct internal::qr_preconditioner_impl; - - internal::qr_preconditioner_impl m_qr_precond_morecols; - internal::qr_preconditioner_impl m_qr_precond_morerows; -}; - -template -void JacobiSVD::allocate(Index rows, Index cols, unsigned int computationOptions) -{ - if (SVDBase::allocate(rows, cols, computationOptions)) return; - - if (QRPreconditioner == FullPivHouseholderQRPreconditioner) - { - eigen_assert(!(this->m_computeThinU || this->m_computeThinV) && - "JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. " - "Use the ColPivHouseholderQR preconditioner instead."); - } - - m_workMatrix.resize(this->m_diagSize, this->m_diagSize); - - if(this->m_cols>this->m_rows) m_qr_precond_morecols.allocate(*this); - if(this->m_rows>this->m_cols) m_qr_precond_morerows.allocate(*this); -} - -template -SVDBase& -JacobiSVD::compute(const MatrixType& matrix, unsigned int computationOptions) -{ - using std::abs; - allocate(matrix.rows(), matrix.cols(), computationOptions); - - // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations, - // only worsening the precision of U and V as we accumulate more rotations - const RealScalar precision = RealScalar(2) * NumTraits::epsilon(); - - // limit for very small denormal numbers to be considered zero in order to avoid infinite loops (see bug 286) - const RealScalar considerAsZero = RealScalar(2) * std::numeric_limits::denorm_min(); - - /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */ - - if(!m_qr_precond_morecols.run(*this, matrix) && !m_qr_precond_morerows.run(*this, matrix)) - { - m_workMatrix = matrix.block(0,0,this->m_diagSize,this->m_diagSize); - if(this->m_computeFullU) this->m_matrixU.setIdentity(this->m_rows,this->m_rows); - if(this->m_computeThinU) this->m_matrixU.setIdentity(this->m_rows,this->m_diagSize); - if(this->m_computeFullV) this->m_matrixV.setIdentity(this->m_cols,this->m_cols); - if(this->m_computeThinV) this->m_matrixV.setIdentity(this->m_cols, this->m_diagSize); - } - - /*** step 2. The main Jacobi SVD iteration. ***/ - - bool finished = false; - while(!finished) - { - finished = true; - - // do a sweep: for all index pairs (p,q), perform SVD of the corresponding 2x2 sub-matrix - - for(Index p = 1; p < this->m_diagSize; ++p) - { - for(Index q = 0; q < p; ++q) - { - // if this 2x2 sub-matrix is not diagonal already... - // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't - // keep us iterating forever. Similarly, small denormal numbers are considered zero. - using std::max; - RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)), - abs(m_workMatrix.coeff(q,q)))); - if((max)(abs(m_workMatrix.coeff(p,q)),abs(m_workMatrix.coeff(q,p))) > threshold) - { - finished = false; - - // perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal - internal::svd_precondition_2x2_block_to_be_real::run(m_workMatrix, *this, p, q); - JacobiRotation j_left, j_right; - internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right); - - // accumulate resulting Jacobi rotations - m_workMatrix.applyOnTheLeft(p,q,j_left); - if(SVDBase::computeU()) this->m_matrixU.applyOnTheRight(p,q,j_left.transpose()); - - m_workMatrix.applyOnTheRight(p,q,j_right); - if(SVDBase::computeV()) this->m_matrixV.applyOnTheRight(p,q,j_right); - } - } - } - } - - /*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/ - - for(Index i = 0; i < this->m_diagSize; ++i) - { - RealScalar a = abs(m_workMatrix.coeff(i,i)); - this->m_singularValues.coeffRef(i) = a; - if(SVDBase::computeU() && (a!=RealScalar(0))) this->m_matrixU.col(i) *= this->m_workMatrix.coeff(i,i)/a; - } - - /*** step 4. Sort singular values in descending order and compute the number of nonzero singular values ***/ - - this->m_nonzeroSingularValues = this->m_diagSize; - for(Index i = 0; i < this->m_diagSize; i++) - { - Index pos; - RealScalar maxRemainingSingularValue = this->m_singularValues.tail(this->m_diagSize-i).maxCoeff(&pos); - if(maxRemainingSingularValue == RealScalar(0)) - { - this->m_nonzeroSingularValues = i; - break; - } - if(pos) - { - pos += i; - std::swap(this->m_singularValues.coeffRef(i), this->m_singularValues.coeffRef(pos)); - if(SVDBase::computeU()) this->m_matrixU.col(pos).swap(this->m_matrixU.col(i)); - if(SVDBase::computeV()) this->m_matrixV.col(pos).swap(this->m_matrixV.col(i)); - } - } - - this->m_isInitialized = true; - return *this; -} - -namespace internal { -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef JacobiSVD<_MatrixType, QRPreconditioner> JacobiSVDType; - EIGEN_MAKE_SOLVE_HELPERS(JacobiSVDType,Rhs) - - template void evalTo(Dest& dst) const - { - eigen_assert(rhs().rows() == dec().rows()); - - // A = U S V^* - // So A^{-1} = V S^{-1} U^* - - Index diagSize = (std::min)(dec().rows(), dec().cols()); - typename JacobiSVDType::SingularValuesType invertedSingVals(diagSize); - - Index nonzeroSingVals = dec().nonzeroSingularValues(); - invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse(); - invertedSingVals.tail(diagSize - nonzeroSingVals).setZero(); - - dst = dec().matrixV().leftCols(diagSize) - * invertedSingVals.asDiagonal() - * dec().matrixU().leftCols(diagSize).adjoint() - * rhs(); - } -}; -} // end namespace internal - -/** \svd_module - * - * \return the singular value decomposition of \c *this computed by two-sided - * Jacobi transformations. - * - * \sa class JacobiSVD - */ -template -JacobiSVD::PlainObject> -MatrixBase::jacobiSvd(unsigned int computationOptions) const -{ - return JacobiSVD(*this, computationOptions); -} - -} // end namespace Eigen - -#endif // EIGEN_JACOBISVD_H diff --git a/unsupported/Eigen/src/SVD/SVDBase.h b/unsupported/Eigen/src/SVD/SVDBase.h deleted file mode 100644 index fd8af3b8c..000000000 --- a/unsupported/Eigen/src/SVD/SVDBase.h +++ /dev/null @@ -1,236 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2009-2010 Benoit Jacob -// -// Copyright (C) 2013 Gauthier Brun -// Copyright (C) 2013 Nicolas Carre -// Copyright (C) 2013 Jean Ceccato -// Copyright (C) 2013 Pierre Zoppitelli -// -// This Source Code Form is subject to the terms of the Mozilla -// Public License v. 2.0. If a copy of the MPL was not distributed -// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - -#ifndef EIGEN_SVD_H -#define EIGEN_SVD_H - -namespace Eigen { -/** \ingroup SVD_Module - * - * - * \class SVDBase - * - * \brief Mother class of SVD classes algorithms - * - * \param MatrixType the type of the matrix of which we are computing the SVD decomposition - * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product - * \f[ A = U S V^* \f] - * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal; - * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left - * and right \em singular \em vectors of \a A respectively. - * - * Singular values are always sorted in decreasing order. - * - * - * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the - * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual - * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix, - * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving. - * - * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to - * terminate in finite (and reasonable) time. - * \sa MatrixBase::genericSvd() - */ -template -class SVDBase -{ - -public: - typedef _MatrixType MatrixType; - typedef typename MatrixType::Scalar Scalar; - typedef typename NumTraits::Real RealScalar; - typedef typename MatrixType::Index Index; - enum { - RowsAtCompileTime = MatrixType::RowsAtCompileTime, - ColsAtCompileTime = MatrixType::ColsAtCompileTime, - DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime), - MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, - MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, - MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime), - MatrixOptions = MatrixType::Options - }; - - typedef Matrix - MatrixUType; - typedef Matrix - MatrixVType; - typedef typename internal::plain_diag_type::type SingularValuesType; - typedef typename internal::plain_row_type::type RowType; - typedef typename internal::plain_col_type::type ColType; - typedef Matrix - WorkMatrixType; - - - - - /** \brief Method performing the decomposition of given matrix using custom options. - * - * \param matrix the matrix to decompose - * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. - * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU, - * #ComputeFullV, #ComputeThinV. - * - * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not - * available with the (non-default) FullPivHouseholderQR preconditioner. - */ - SVDBase& compute(const MatrixType& matrix, unsigned int computationOptions); - - /** \brief Method performing the decomposition of given matrix using current options. - * - * \param matrix the matrix to decompose - * - * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int). - */ - //virtual SVDBase& compute(const MatrixType& matrix) = 0; - SVDBase& compute(const MatrixType& matrix); - - /** \returns the \a U matrix. - * - * For the SVDBase decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, - * the U matrix is n-by-n if you asked for #ComputeFullU, and is n-by-m if you asked for #ComputeThinU. - * - * The \a m first columns of \a U are the left singular vectors of the matrix being decomposed. - * - * This method asserts that you asked for \a U to be computed. - */ - const MatrixUType& matrixU() const - { - eigen_assert(m_isInitialized && "SVD is not initialized."); - eigen_assert(computeU() && "This SVD decomposition didn't compute U. Did you ask for it?"); - return m_matrixU; - } - - /** \returns the \a V matrix. - * - * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, - * the V matrix is p-by-p if you asked for #ComputeFullV, and is p-by-m if you asked for ComputeThinV. - * - * The \a m first columns of \a V are the right singular vectors of the matrix being decomposed. - * - * This method asserts that you asked for \a V to be computed. - */ - const MatrixVType& matrixV() const - { - eigen_assert(m_isInitialized && "SVD is not initialized."); - eigen_assert(computeV() && "This SVD decomposition didn't compute V. Did you ask for it?"); - return m_matrixV; - } - - /** \returns the vector of singular values. - * - * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the - * returned vector has size \a m. Singular values are always sorted in decreasing order. - */ - const SingularValuesType& singularValues() const - { - eigen_assert(m_isInitialized && "SVD is not initialized."); - return m_singularValues; - } - - - - /** \returns the number of singular values that are not exactly 0 */ - Index nonzeroSingularValues() const - { - eigen_assert(m_isInitialized && "SVD is not initialized."); - return m_nonzeroSingularValues; - } - - - /** \returns true if \a U (full or thin) is asked for in this SVD decomposition */ - inline bool computeU() const { return m_computeFullU || m_computeThinU; } - /** \returns true if \a V (full or thin) is asked for in this SVD decomposition */ - inline bool computeV() const { return m_computeFullV || m_computeThinV; } - - - inline Index rows() const { return m_rows; } - inline Index cols() const { return m_cols; } - - -protected: - // return true if already allocated - bool allocate(Index rows, Index cols, unsigned int computationOptions) ; - - MatrixUType m_matrixU; - MatrixVType m_matrixV; - SingularValuesType m_singularValues; - bool m_isInitialized, m_isAllocated; - bool m_computeFullU, m_computeThinU; - bool m_computeFullV, m_computeThinV; - unsigned int m_computationOptions; - Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize; - - - /** \brief Default Constructor. - * - * Default constructor of SVDBase - */ - SVDBase() - : m_isInitialized(false), - m_isAllocated(false), - m_computationOptions(0), - m_rows(-1), m_cols(-1) - {} - - -}; - - -template -bool SVDBase::allocate(Index rows, Index cols, unsigned int computationOptions) -{ - eigen_assert(rows >= 0 && cols >= 0); - - if (m_isAllocated && - rows == m_rows && - cols == m_cols && - computationOptions == m_computationOptions) - { - return true; - } - - m_rows = rows; - m_cols = cols; - m_isInitialized = false; - m_isAllocated = true; - m_computationOptions = computationOptions; - m_computeFullU = (computationOptions & ComputeFullU) != 0; - m_computeThinU = (computationOptions & ComputeThinU) != 0; - m_computeFullV = (computationOptions & ComputeFullV) != 0; - m_computeThinV = (computationOptions & ComputeThinV) != 0; - eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U"); - eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V"); - eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) && - "SVDBase: thin U and V are only available when your matrix has a dynamic number of columns."); - - m_diagSize = (std::min)(m_rows, m_cols); - m_singularValues.resize(m_diagSize); - if(RowsAtCompileTime==Dynamic) - m_matrixU.resize(m_rows, m_computeFullU ? m_rows - : m_computeThinU ? m_diagSize - : 0); - if(ColsAtCompileTime==Dynamic) - m_matrixV.resize(m_cols, m_computeFullV ? m_cols - : m_computeThinV ? m_diagSize - : 0); - - return false; -} - -}// end namespace - -#endif // EIGEN_SVD_H diff --git a/unsupported/Eigen/src/SVD/TODOBdcsvd.txt b/unsupported/Eigen/src/SVD/TODOBdcsvd.txt deleted file mode 100644 index 0bc9a46e6..000000000 --- a/unsupported/Eigen/src/SVD/TODOBdcsvd.txt +++ /dev/null @@ -1,29 +0,0 @@ -TO DO LIST - - - -(optional optimization) - do all the allocations in the allocate part - - support static matrices - - return a error at compilation time when using integer matrices (int, long, std::complex, ...) - -to finish the algorithm : - -implement the last part of the algorithm as described on the reference paper. - You may find more information on that part on this paper - - -to replace the call to JacobiSVD at the end of the divide algorithm, just after the call to - deflation. - -(suggested step by step resolution) - 0) comment the call to Jacobi in the last part of the divide method and everything right after - until the end of the method. What is commented can be a guideline to steps 3) 4) and 6) - 1) solve the secular equation (Characteristic equation) on the values that are not null (zi!=0 and di!=0), after the deflation - wich should be uncommented in the divide method - 2) remember the values of the singular values that are already computed (zi=0) - 3) assign the singular values found in m_computed at the right places (with the ones found in step 2) ) - in decreasing order - 4) set the firstcol to zero (except the first element) in m_computed - 5) compute all the singular vectors when CompV is set to true and only the left vectors when - CompV is set to false - 6) multiply naiveU and naiveV to the right by the matrices found, only naiveU when CompV is set to - false, /!\ if CompU is false NaiveU has only 2 rows - 7) delete everything commented in step 0) diff --git a/unsupported/Eigen/src/SVD/doneInBDCSVD.txt b/unsupported/Eigen/src/SVD/doneInBDCSVD.txt deleted file mode 100644 index 8563ddab8..000000000 --- a/unsupported/Eigen/src/SVD/doneInBDCSVD.txt +++ /dev/null @@ -1,21 +0,0 @@ -This unsupported package is about a divide and conquer algorithm to compute SVD. - -The implementation follows as closely as possible the following reference paper : -http://www.cs.yale.edu/publications/techreports/tr933.pdf - -The code documentation uses the same names for variables as the reference paper. The code, deflation included, is -working but there are a few things that could be optimised as explained in the TODOBdsvd. - -In the code comments were put at the line where would be the third step of the algorithm so one could simply add the call -of a function doing the last part of the algorithm and that would not require any knowledge of the part we implemented. - -In the TODOBdcsvd we explain what is the main difficulty of the last part and suggest a reference paper to help solve it. - -The implemented has trouble with fixed size matrices. - -In the actual implementation, it returns matrices of zero when ask to do a svd on an int matrix. - - -Paper for the third part: -http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf - diff --git a/unsupported/test/svd_common.h b/unsupported/test/svd_common.h index b40c23a2b..6a96e7c74 100644 --- a/unsupported/test/svd_common.h +++ b/unsupported/test/svd_common.h @@ -18,7 +18,7 @@ #define EIGEN_RUNTIME_NO_MALLOC #include "main.h" -#include +#include #include -- cgit v1.2.3 From a96f3d629cfd5e562430f49c9c4d632c365e8020 Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Tue, 2 Sep 2014 22:30:23 +0200 Subject: Clean bdcsvd --- unsupported/Eigen/src/BDCSVD/BDCSVD.h | 73 ++++------------------------------- 1 file changed, 7 insertions(+), 66 deletions(-) (limited to 'unsupported/Eigen') diff --git a/unsupported/Eigen/src/BDCSVD/BDCSVD.h b/unsupported/Eigen/src/BDCSVD/BDCSVD.h index a7c369633..829446911 100644 --- a/unsupported/Eigen/src/BDCSVD/BDCSVD.h +++ b/unsupported/Eigen/src/BDCSVD/BDCSVD.h @@ -57,6 +57,8 @@ class BDCSVD : public SVDBase > public: using Base::rows; using Base::cols; + using Base::computeU; + using Base::computeV; typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; @@ -72,15 +74,10 @@ public: MatrixOptions = MatrixType::Options }; - typedef Matrix - MatrixUType; - typedef Matrix - MatrixVType; - typedef typename internal::plain_diag_type::type SingularValuesType; - typedef typename internal::plain_row_type::type RowType; - typedef typename internal::plain_col_type::type ColType; + typedef typename Base::MatrixUType MatrixUType; + typedef typename Base::MatrixVType MatrixVType; + typedef typename Base::SingularValuesType SingularValuesType; + typedef Matrix MatrixX; typedef Matrix MatrixXr; typedef Matrix VectorType; @@ -155,27 +152,6 @@ public: eigen_assert(s>3 && "BDCSVD the size of the algo switch has to be greater than 3"); algoswap = s; } - - - /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A. - * - * \param b the right - hand - side of the equation to solve. - * - * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V. - * - * \note SVD solving is implicitly least - squares. Thus, this method serves both purposes of exact solving and least - squares solving. - * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$. - */ - template - inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(this->m_isInitialized && "BDCSVD is not initialized."); - eigen_assert(computeU() && computeV() && - "BDCSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice)."); - return internal::solve_retval(*this, b.derived()); - } - const MatrixUType& matrixU() const { @@ -188,11 +164,9 @@ public: { eigen_assert(this->computeU() && "This SVD decomposition didn't compute U. Did you ask for it?"); return this->m_matrixU; - } - + } } - const MatrixVType& matrixV() const { eigen_assert(this->m_isInitialized && "SVD is not initialized."); @@ -206,9 +180,6 @@ public: return this->m_matrixV; } } - - using Base::computeU; - using Base::computeV; private: void allocate(Index rows, Index cols, unsigned int computationOptions); @@ -898,36 +869,6 @@ void BDCSVD::deflation(Index firstCol, Index lastCol, Index k, Index }//end deflation -namespace internal{ - -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef BDCSVD<_MatrixType> BDCSVDType; - EIGEN_MAKE_SOLVE_HELPERS(BDCSVDType, Rhs) - - template void evalTo(Dest& dst) const - { - eigen_assert(rhs().rows() == dec().rows()); - // A = U S V^* - // So A^{ - 1} = V S^{ - 1} U^* - Index diagSize = (std::min)(dec().rows(), dec().cols()); - typename BDCSVDType::SingularValuesType invertedSingVals(diagSize); - Index nonzeroSingVals = dec().nonzeroSingularValues(); - invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse(); - invertedSingVals.tail(diagSize - nonzeroSingVals).setZero(); - - dst = dec().matrixV().leftCols(diagSize) - * invertedSingVals.asDiagonal() - * dec().matrixU().leftCols(diagSize).adjoint() - * rhs(); - return; - } -}; - -} //end namespace internal - /** \svd_module * * \return the singular value decomposition of \c *this computed by -- cgit v1.2.3 From c82dc227f19e75571ff4d8c47dfbd66765c8dbc5 Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Wed, 3 Sep 2014 10:15:24 +0200 Subject: Cleaning in BDCSVD (formating, handling of transpose case, remove some for loops) --- Eigen/src/SVD/SVDBase.h | 1 - unsupported/Eigen/src/BDCSVD/BDCSVD.h | 441 ++++++++++++++++------------------ 2 files changed, 202 insertions(+), 240 deletions(-) (limited to 'unsupported/Eigen') diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index a4bb97f4b..60a5aabb6 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -267,7 +267,6 @@ void SVDBase::_solve_impl(const RhsType &rhs, DstType &dst) const Matrix tmp; Index l_rank = rank(); - tmp.noalias() = m_matrixU.leftCols(l_rank).adjoint() * rhs; tmp = m_singularValues.head(l_rank).asDiagonal().inverse() * tmp; dst = m_matrixV.leftCols(l_rank) * tmp; diff --git a/unsupported/Eigen/src/BDCSVD/BDCSVD.h b/unsupported/Eigen/src/BDCSVD/BDCSVD.h index 829446911..64cee029b 100644 --- a/unsupported/Eigen/src/BDCSVD/BDCSVD.h +++ b/unsupported/Eigen/src/BDCSVD/BDCSVD.h @@ -19,10 +19,6 @@ #ifndef EIGEN_BDCSVD_H #define EIGEN_BDCSVD_H -#define EPSILON 0.0000000000000001 - -#define ALGOSWAP 16 - namespace Eigen { template class BDCSVD; @@ -88,7 +84,7 @@ public: * The default constructor is useful in cases in which the user intends to * perform decompositions via BDCSVD::compute(const MatrixType&). */ - BDCSVD() : algoswap(ALGOSWAP), m_numIters(0) + BDCSVD() : m_algoswap(16), m_numIters(0) {} @@ -99,7 +95,7 @@ public: * \sa BDCSVD() */ BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0) - : algoswap(ALGOSWAP), m_numIters(0) + : m_algoswap(16), m_numIters(0) { allocate(rows, cols, computationOptions); } @@ -115,7 +111,7 @@ public: * available with the (non - default) FullPivHouseholderQR preconditioner. */ BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0) - : algoswap(ALGOSWAP), m_numIters(0) + : m_algoswap(16), m_numIters(0) { compute(matrix, computationOptions); } @@ -150,35 +146,7 @@ public: void setSwitchSize(int s) { eigen_assert(s>3 && "BDCSVD the size of the algo switch has to be greater than 3"); - algoswap = s; - } - - const MatrixUType& matrixU() const - { - eigen_assert(this->m_isInitialized && "SVD is not initialized."); - if (isTranspose){ - eigen_assert(this->computeV() && "This SVD decomposition didn't compute U. Did you ask for it?"); - return this->m_matrixV; - } - else - { - eigen_assert(this->computeU() && "This SVD decomposition didn't compute U. Did you ask for it?"); - return this->m_matrixU; - } - } - - const MatrixVType& matrixV() const - { - eigen_assert(this->m_isInitialized && "SVD is not initialized."); - if (isTranspose){ - eigen_assert(this->computeU() && "This SVD decomposition didn't compute V. Did you ask for it?"); - return this->m_matrixU; - } - else - { - eigen_assert(this->computeV() && "This SVD decomposition didn't compute V. Did you ask for it?"); - return this->m_matrixV; - } + m_algoswap = s; } private: @@ -194,15 +162,26 @@ private: void deflation43(Index firstCol, Index shift, Index i, Index size); void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size); void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift); - void copyUV(const typename internal::UpperBidiagonalization::HouseholderUSequenceType& householderU, - const typename internal::UpperBidiagonalization::HouseholderVSequenceType& householderV); + template + void copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naivev); protected: MatrixXr m_naiveU, m_naiveV; MatrixXr m_computed; - Index nRec; - int algoswap; - bool isTranspose, compU, compV; + Index m_nRec; + int m_algoswap; + bool m_isTranspose, m_compU, m_compV; + + using Base::m_singularValues; + using Base::m_diagSize; + using Base::m_computeFullU; + using Base::m_computeFullV; + using Base::m_computeThinU; + using Base::m_computeThinV; + using Base::m_matrixU; + using Base::m_matrixV; + using Base::m_isInitialized; + using Base::m_nonzeroSingularValues; public: int m_numIters; @@ -213,50 +192,35 @@ public: template void BDCSVD::allocate(Index rows, Index cols, unsigned int computationOptions) { - isTranspose = (cols > rows); - if (Base::allocate(rows, cols, computationOptions)) return; - m_computed = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize ); - if (isTranspose){ - compU = this->computeU(); - compV = this->computeV(); - } - else - { - compV = this->computeU(); - compU = this->computeV(); - } - if (compU) m_naiveU = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize + 1 ); - else m_naiveU = MatrixXr::Zero(2, this->m_diagSize + 1 ); + m_isTranspose = (cols > rows); + if (Base::allocate(rows, cols, computationOptions)) + return; - if (compV) m_naiveV = MatrixXr::Zero(this->m_diagSize, this->m_diagSize); + m_computed = MatrixXr::Zero(m_diagSize + 1, m_diagSize ); + m_compU = computeV(); + m_compV = computeU(); + if (m_isTranspose) + std::swap(m_compU, m_compV); - - //should be changed for a cleaner implementation - if (isTranspose){ - bool aux; - if (this->computeU()||this->computeV()){ - aux = this->m_computeFullU; - this->m_computeFullU = this->m_computeFullV; - this->m_computeFullV = aux; - aux = this->m_computeThinU; - this->m_computeThinU = this->m_computeThinV; - this->m_computeThinV = aux; - } - } + if (m_compU) m_naiveU = MatrixXr::Zero(m_diagSize + 1, m_diagSize + 1 ); + else m_naiveU = MatrixXr::Zero(2, m_diagSize + 1 ); + + if (m_compV) m_naiveV = MatrixXr::Zero(m_diagSize, m_diagSize); }// end allocate // Methode which compute the BDCSVD for the int template<> -BDCSVD >& BDCSVD >::compute(const MatrixType& matrix, unsigned int computationOptions) { +BDCSVD >& BDCSVD >::compute(const MatrixType& matrix, unsigned int computationOptions) +{ allocate(matrix.rows(), matrix.cols(), computationOptions); - this->m_nonzeroSingularValues = 0; + m_nonzeroSingularValues = 0; m_computed = Matrix::Zero(rows(), cols()); - for (int i=0; im_diagSize; i++) { - this->m_singularValues.coeffRef(i) = 0; - } - if (this->m_computeFullU) this->m_matrixU = Matrix::Zero(rows(), rows()); - if (this->m_computeFullV) this->m_matrixV = Matrix::Zero(cols(), cols()); - this->m_isInitialized = true; + + m_singularValues.head(m_diagSize).setZero(); + + if (m_computeFullU) m_matrixU.setZero(rows(), rows()); + if (m_computeFullV) m_matrixV.setZero(cols(), cols()); + m_isInitialized = true; return *this; } @@ -268,59 +232,62 @@ BDCSVD& BDCSVD::compute(const MatrixType& matrix, unsign allocate(matrix.rows(), matrix.cols(), computationOptions); using std::abs; - //**** step 1 Bidiagonalization isTranspose = (matrix.cols()>matrix.rows()) ; + //**** step 1 Bidiagonalization m_isTranspose = (matrix.cols()>matrix.rows()) ; MatrixType copy; - if (isTranspose) copy = matrix.adjoint(); - else copy = matrix; + if (m_isTranspose) copy = matrix.adjoint(); + else copy = matrix; internal::UpperBidiagonalization bid(copy); //**** step 2 Divide - m_computed.topRows(this->m_diagSize) = bid.bidiagonal().toDenseMatrix().transpose(); + m_computed.topRows(m_diagSize) = bid.bidiagonal().toDenseMatrix().transpose(); m_computed.template bottomRows<1>().setZero(); - divide(0, this->m_diagSize - 1, 0, 0, 0); + divide(0, m_diagSize - 1, 0, 0, 0); //**** step 3 copy - for (int i=0; im_diagSize; i++) { + for (int i=0; im_singularValues.coeffRef(i) = a; - if (a == 0){ - this->m_nonzeroSingularValues = i; - this->m_singularValues.tail(this->m_diagSize - i - 1).setZero(); + m_singularValues.coeffRef(i) = a; + if (a == 0) + { + m_nonzeroSingularValues = i; + m_singularValues.tail(m_diagSize - i - 1).setZero(); break; } - else if (i == this->m_diagSize - 1) + else if (i == m_diagSize - 1) { - this->m_nonzeroSingularValues = i + 1; + m_nonzeroSingularValues = i + 1; break; } } - copyUV(bid.householderU(), bid.householderV()); - this->m_isInitialized = true; + if(m_isTranspose) copyUV(bid.householderV(), bid.householderU(), m_naiveV, m_naiveU); + else copyUV(bid.householderU(), bid.householderV(), m_naiveU, m_naiveV); + m_isInitialized = true; return *this; }// end compute template -void BDCSVD::copyUV(const typename internal::UpperBidiagonalization::HouseholderUSequenceType& householderU, - const typename internal::UpperBidiagonalization::HouseholderVSequenceType& householderV) +template +void BDCSVD::copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naiveV) { // Note exchange of U and V: m_matrixU is set from m_naiveV and vice versa - if (this->computeU()){ - Index Ucols = this->m_computeThinU ? this->m_nonzeroSingularValues : householderU.cols(); - this->m_matrixU = MatrixX::Identity(householderU.cols(), Ucols); - Index blockCols = this->m_computeThinU ? this->m_nonzeroSingularValues : this->m_diagSize; - this->m_matrixU.block(0, 0, this->m_diagSize, blockCols) = - m_naiveV.template cast().block(0, 0, this->m_diagSize, blockCols); - this->m_matrixU = householderU * this->m_matrixU; + if (computeU()) + { + Index Ucols = m_computeThinU ? m_nonzeroSingularValues : householderU.cols(); + m_matrixU = MatrixX::Identity(householderU.cols(), Ucols); + Index blockCols = m_computeThinU ? m_nonzeroSingularValues : m_diagSize; + m_matrixU.topLeftCorner(m_diagSize, blockCols) = naiveV.template cast().topLeftCorner(m_diagSize, blockCols); + m_matrixU = householderU * m_matrixU; } - if (this->computeV()){ - Index Vcols = this->m_computeThinV ? this->m_nonzeroSingularValues : householderV.cols(); - this->m_matrixV = MatrixX::Identity(householderV.cols(), Vcols); - Index blockCols = this->m_computeThinV ? this->m_nonzeroSingularValues : this->m_diagSize; - this->m_matrixV.block(0, 0, this->m_diagSize, blockCols) = - m_naiveU.template cast().block(0, 0, this->m_diagSize, blockCols); - this->m_matrixV = householderV * this->m_matrixV; + if (computeV()) + { + Index Vcols = m_computeThinV ? m_nonzeroSingularValues : householderV.cols(); + m_matrixV = MatrixX::Identity(householderV.cols(), Vcols); + Index blockCols = m_computeThinV ? m_nonzeroSingularValues : m_diagSize; + m_matrixV.topLeftCorner(m_diagSize, blockCols) = naiveU.template cast().topLeftCorner(m_diagSize, blockCols); + m_matrixV = householderV * m_matrixV; } } @@ -335,8 +302,7 @@ void BDCSVD::copyUV(const typename internal::UpperBidiagonalization< //@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix // to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper. template -void BDCSVD::divide (Index firstCol, Index lastCol, Index firstRowW, - Index firstColW, Index shift) +void BDCSVD::divide (Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift) { // requires nbRows = nbCols + 1; using std::pow; @@ -351,21 +317,19 @@ void BDCSVD::divide (Index firstCol, Index lastCol, Index firstRowW, MatrixXr l, f; // We use the other algorithm which is more efficient for small // matrices. - if (n < algoswap){ - JacobiSVD b(m_computed.block(firstCol, firstCol, n + 1, n), - ComputeFullU | (ComputeFullV * compV)) ; - if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() << b.matrixU(); + if (n < m_algoswap) + { + JacobiSVD b(m_computed.block(firstCol, firstCol, n + 1, n), ComputeFullU | (m_compV ? ComputeFullV : 0)) ; + if (m_compU) + m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() = b.matrixU(); else { - m_naiveU.row(0).segment(firstCol, n + 1).real() << b.matrixU().row(0); - m_naiveU.row(1).segment(firstCol, n + 1).real() << b.matrixU().row(n); + m_naiveU.row(0).segment(firstCol, n + 1).real() = b.matrixU().row(0); + m_naiveU.row(1).segment(firstCol, n + 1).real() = b.matrixU().row(n); } - if (compV) m_naiveV.block(firstRowW, firstColW, n, n).real() << b.matrixV(); + if (m_compV) m_naiveV.block(firstRowW, firstColW, n, n).real() = b.matrixV(); m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero(); - for (int i=0; i::divide (Index firstCol, Index lastCol, Index firstRowW, // right submatrix before the left one. divide(k + 1 + firstCol, lastCol, k + 1 + firstRowW, k + 1 + firstColW, shift); divide(firstCol, k - 1 + firstCol, firstRowW, firstColW + 1, shift + 1); - if (compU) + if (m_compU) { lambda = m_naiveU(firstCol + k, firstCol + k); phi = m_naiveU(firstCol + k + 1, lastCol + 1); @@ -386,9 +350,8 @@ void BDCSVD::divide (Index firstCol, Index lastCol, Index firstRowW, lambda = m_naiveU(1, firstCol + k); phi = m_naiveU(0, lastCol + 1); } - r0 = sqrt((abs(alphaK * lambda) * abs(alphaK * lambda)) - + abs(betaK * phi) * abs(betaK * phi)); - if (compU) + r0 = sqrt((abs(alphaK * lambda) * abs(alphaK * lambda)) + abs(betaK * phi) * abs(betaK * phi)); + if (m_compU) { l = m_naiveU.row(firstCol + k).segment(firstCol, k); f = m_naiveU.row(firstCol + k + 1).segment(firstCol + k + 1, n - k - 1); @@ -398,7 +361,7 @@ void BDCSVD::divide (Index firstCol, Index lastCol, Index firstRowW, l = m_naiveU.row(1).segment(firstCol, k); f = m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1); } - if (compV) m_naiveV(firstRowW+k, firstColW) = 1; + if (m_compV) m_naiveV(firstRowW+k, firstColW) = 1; if (r0 == 0) { c0 = 1; @@ -409,21 +372,18 @@ void BDCSVD::divide (Index firstCol, Index lastCol, Index firstRowW, c0 = alphaK * lambda / r0; s0 = betaK * phi / r0; } - if (compU) + if (m_compU) { MatrixXr q1 (m_naiveU.col(firstCol + k).segment(firstCol, k + 1)); // we shiftW Q1 to the right for (Index i = firstCol + k - 1; i >= firstCol; i--) - { - m_naiveU.col(i + 1).segment(firstCol, k + 1) << m_naiveU.col(i).segment(firstCol, k + 1); - } + m_naiveU.col(i + 1).segment(firstCol, k + 1) = m_naiveU.col(i).segment(firstCol, k + 1); // we shift q1 at the left with a factor c0 - m_naiveU.col(firstCol).segment( firstCol, k + 1) << (q1 * c0); + m_naiveU.col(firstCol).segment( firstCol, k + 1) = (q1 * c0); // last column = q1 * - s0 - m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) << (q1 * ( - s0)); + m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) = (q1 * ( - s0)); // first column = q2 * s0 - m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) << - m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *s0; + m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) = m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) * s0; // q2 *= c0 m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0; } @@ -432,9 +392,7 @@ void BDCSVD::divide (Index firstCol, Index lastCol, Index firstRowW, RealScalar q1 = (m_naiveU(0, firstCol + k)); // we shift Q1 to the right for (Index i = firstCol + k - 1; i >= firstCol; i--) - { m_naiveU(0, i + 1) = m_naiveU(0, i); - } // we shift q1 at the left with a factor c0 m_naiveU(0, firstCol) = (q1 * c0); // last column = q1 * - s0 @@ -447,8 +405,8 @@ void BDCSVD::divide (Index firstCol, Index lastCol, Index firstRowW, m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero(); } m_computed(firstCol + shift, firstCol + shift) = r0; - m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) << alphaK * l.transpose().real(); - m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) << betaK * f.transpose().real(); + m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) = alphaK * l.transpose().real(); + m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) = betaK * f.transpose().real(); // Second part: try to deflate singular values in combined matrix @@ -458,9 +416,9 @@ void BDCSVD::divide (Index firstCol, Index lastCol, Index firstRowW, MatrixXr UofSVD, VofSVD; VectorType singVals; computeSVDofM(firstCol + shift, n, UofSVD, singVals, VofSVD); - if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1) *= UofSVD; - else m_naiveU.block(0, firstCol, 2, n + 1) *= UofSVD; - if (compV) m_naiveV.block(firstRowW, firstColW, n, n) *= VofSVD; + if (m_compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1) *= UofSVD; // FIXME this requires a temporary + else m_naiveU.block(0, firstCol, 2, n + 1) *= UofSVD; // FIXME this requires a temporary, and exploit that there are 2 rows at compile time + if (m_compV) m_naiveV.block(firstRowW, firstColW, n, n) *= VofSVD; // FIXME this requires a temporary m_computed.block(firstCol + shift, firstCol + shift, n, n).setZero(); m_computed.block(firstCol + shift, firstCol + shift, n, n).diagonal() = singVals; }// end divide @@ -468,7 +426,7 @@ void BDCSVD::divide (Index firstCol, Index lastCol, Index firstRowW, // Compute SVD of m_computed.block(firstCol, firstCol, n + 1, n); this block only has non-zeros in // the first column and on the diagonal and has undergone deflation, so diagonal is in increasing // order except for possibly the (0,0) entry. The computed SVD is stored U, singVals and V, except -// that if compV is false, then V is not computed. Singular values are sorted in decreasing order. +// that if m_compV is false, then V is not computed. Singular values are sorted in decreasing order. // // TODO Opportunities for optimization: better root finding algo, better stopping criterion, better // handling of round-off errors, be consistent in ordering @@ -483,7 +441,7 @@ void BDCSVD::computeSVDofM(Index firstCol, Index n, MatrixXr& U, Vec // compute singular values and vectors (in decreasing order) singVals.resize(n); U.resize(n+1, n+1); - if (compV) V.resize(n, n); + if (m_compV) V.resize(n, n); if (col0.hasNaN() || diag.hasNaN()) return; @@ -495,7 +453,7 @@ void BDCSVD::computeSVDofM(Index firstCol, Index n, MatrixXr& U, Vec // Reverse order so that singular values in increased order singVals.reverseInPlace(); U.leftCols(n) = U.leftCols(n).rowwise().reverse().eval(); - if (compV) V = V.rowwise().reverse().eval(); + if (m_compV) V = V.rowwise().reverse().eval(); } template @@ -504,10 +462,13 @@ void BDCSVD::computeSingVals(const ArrayXr& col0, const ArrayXr& dia { using std::abs; using std::swap; + using std::max; Index n = col0.size(); - for (Index k = 0; k < n; ++k) { - if (col0(k) == 0) { + for (Index k = 0; k < n; ++k) + { + if (col0(k) == 0) + { // entry is deflated, so singular value is on diagonal singVals(k) = diag(k); mus(k) = 0; @@ -523,27 +484,29 @@ void BDCSVD::computeSingVals(const ArrayXr& col0, const ArrayXr& dia RealScalar mid = left + (right-left) / 2; RealScalar fMid = 1 + (col0.square() / ((diag + mid) * (diag - mid))).sum(); - RealScalar shift; - if (k == n-1 || fMid > 0) shift = left; - else shift = right; + RealScalar shift = (k == n-1 || fMid > 0) ? left : right; // measure everything relative to shift ArrayXr diagShifted = diag - shift; // initial guess RealScalar muPrev, muCur; - if (shift == left) { + if (shift == left) + { muPrev = (right - left) * 0.1; if (k == n-1) muCur = right - left; - else muCur = (right - left) * 0.5; - } else { + else muCur = (right - left) * 0.5; + } + else + { muPrev = -(right - left) * 0.1; muCur = -(right - left) * 0.5; } RealScalar fPrev = 1 + (col0.square() / ((diagShifted - muPrev) * (diag + shift + muPrev))).sum(); RealScalar fCur = 1 + (col0.square() / ((diagShifted - muCur) * (diag + shift + muCur))).sum(); - if (abs(fPrev) < abs(fCur)) { + if (abs(fPrev) < abs(fCur)) + { swap(fPrev, fCur); swap(muPrev, muCur); } @@ -551,7 +514,8 @@ void BDCSVD::computeSingVals(const ArrayXr& col0, const ArrayXr& dia // rational interpolation: fit a function of the form a / mu + b through the two previous // iterates and use its zero to compute the next iterate bool useBisection = false; - while (abs(muCur - muPrev) > 8 * NumTraits::epsilon() * (std::max)(abs(muCur), abs(muPrev)) && fCur != fPrev && !useBisection) { + while (abs(muCur - muPrev) > 8 * NumTraits::epsilon() * (max)(abs(muCur), abs(muPrev)) && fCur != fPrev && !useBisection) + { ++m_numIters; RealScalar a = (fCur - fPrev) / (1/muCur - 1/muPrev); @@ -567,13 +531,17 @@ void BDCSVD::computeSingVals(const ArrayXr& col0, const ArrayXr& dia } // fall back on bisection method if rational interpolation did not work - if (useBisection) { + if (useBisection) + { RealScalar leftShifted, rightShifted; - if (shift == left) { + if (shift == left) + { leftShifted = 1e-30; if (k == 0) rightShifted = right - left; - else rightShifted = (right - left) * 0.6; // theoretically we can take 0.5, but let's be safe - } else { + else rightShifted = (right - left) * 0.6; // theoretically we can take 0.5, but let's be safe + } + else + { leftShifted = -(right - left) * 0.6; rightShifted = -1e-30; } @@ -582,13 +550,17 @@ void BDCSVD::computeSingVals(const ArrayXr& col0, const ArrayXr& dia RealScalar fRight = 1 + (col0.square() / ((diagShifted - rightShifted) * (diag + shift + rightShifted))).sum(); assert(fLeft * fRight < 0); - while (rightShifted - leftShifted > 2 * NumTraits::epsilon() * (std::max)(abs(leftShifted), abs(rightShifted))) { + while (rightShifted - leftShifted > 2 * NumTraits::epsilon() * (max)(abs(leftShifted), abs(rightShifted))) + { RealScalar midShifted = (leftShifted + rightShifted) / 2; RealScalar fMid = 1 + (col0.square() / ((diagShifted - midShifted) * (diag + shift + midShifted))).sum(); - if (fLeft * fMid < 0) { + if (fLeft * fMid < 0) + { rightShifted = midShifted; fRight = fMid; - } else { + } + else + { leftShifted = midShifted; fLeft = fMid; } @@ -615,13 +587,15 @@ void BDCSVD::perturbCol0 (const ArrayXr& col0, const ArrayXr& diag, const VectorType& singVals, const ArrayXr& shifts, const ArrayXr& mus, ArrayXr& zhat) { + using std::sqrt; Index n = col0.size(); - for (Index k = 0; k < n; ++k) { + for (Index k = 0; k < n; ++k) + { if (col0(k) == 0) zhat(k) = 0; - else { + else + { // see equation (3.6) - using std::sqrt; RealScalar tmp = sqrt( (singVals(n-1) + diag(k)) * (mus(n-1) + (shifts(n-1) - diag(k))) @@ -647,16 +621,21 @@ void BDCSVD::computeSingVecs const ArrayXr& shifts, const ArrayXr& mus, MatrixXr& U, MatrixXr& V) { Index n = zhat.size(); - for (Index k = 0; k < n; ++k) { - if (zhat(k) == 0) { + for (Index k = 0; k < n; ++k) + { + if (zhat(k) == 0) + { U.col(k) = VectorType::Unit(n+1, k); - if (compV) V.col(k) = VectorType::Unit(n, k); - } else { + if (m_compV) V.col(k) = VectorType::Unit(n, k); + } + else + { U.col(k).head(n) = zhat / (((diag - shifts(k)) - mus(k)) * (diag + singVals[k])); U(n,k) = 0; U.col(k).normalize(); - if (compV) { + if (m_compV) + { V.col(k).tail(n-1) = (diag * zhat / (((diag - shifts(k)) - mus(k)) * (diag + singVals[k]))).tail(n-1); V(0,k) = -1; V.col(k).normalize(); @@ -671,15 +650,17 @@ void BDCSVD::computeSingVecs // i >= 1, di almost null and zi non null. // We use a rotation to zero out zi applied to the left of M template -void BDCSVD::deflation43(Index firstCol, Index shift, Index i, Index size){ +void BDCSVD::deflation43(Index firstCol, Index shift, Index i, Index size) +{ using std::abs; using std::sqrt; using std::pow; RealScalar c = m_computed(firstCol + shift, firstCol + shift); RealScalar s = m_computed(i, firstCol + shift); RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2)); - if (r == 0){ - m_computed(i, i)=0; + if (r == 0) + { + m_computed(i, i) = 0; return; } c/=r; @@ -687,7 +668,8 @@ void BDCSVD::deflation43(Index firstCol, Index shift, Index i, Index m_computed(firstCol + shift, firstCol + shift) = r; m_computed(i, firstCol + shift) = 0; m_computed(i, i) = 0; - if (compU){ + if (m_compU) + { m_naiveU.col(firstCol).segment(firstCol,size) = c * m_naiveU.col(firstCol).segment(firstCol, size) - s * m_naiveU.col(i).segment(firstCol, size) ; @@ -703,7 +685,8 @@ void BDCSVD::deflation43(Index firstCol, Index shift, Index i, Index // i,j >= 1, i != j and |di - dj| < epsilon * norm2(M) // We apply two rotations to have zj = 0; template -void BDCSVD::deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size){ +void BDCSVD::deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size) +{ using std::abs; using std::sqrt; using std::conj; @@ -711,7 +694,8 @@ void BDCSVD::deflation44(Index firstColu , Index firstColm, Index fi RealScalar c = m_computed(firstColm, firstColm + j - 1); RealScalar s = m_computed(firstColm, firstColm + i - 1); RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2)); - if (r==0){ + if (r==0) + { m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j); return; } @@ -720,7 +704,8 @@ void BDCSVD::deflation44(Index firstColu , Index firstColm, Index fi m_computed(firstColm + i, firstColm) = r; m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j); m_computed(firstColm + j, firstColm) = 0; - if (compU){ + if (m_compU) + { m_naiveU.col(firstColu + i).segment(firstColu, size) = c * m_naiveU.col(firstColu + i).segment(firstColu, size) - s * m_naiveU.col(firstColu + j).segment(firstColu, size) ; @@ -729,7 +714,8 @@ void BDCSVD::deflation44(Index firstColu , Index firstColm, Index fi (c + s*s/c) * m_naiveU.col(firstColu + j).segment(firstColu, size) + (s/c) * m_naiveU.col(firstColu + i).segment(firstColu, size); } - if (compV){ + if (m_compV) + { m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) = c * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) + s * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) ; @@ -743,72 +729,56 @@ void BDCSVD::deflation44(Index firstColu , Index firstColm, Index fi // acts on block from (firstCol+shift, firstCol+shift) to (lastCol+shift, lastCol+shift) [inclusive] template -void BDCSVD::deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift){ +void BDCSVD::deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift) +{ //condition 4.1 using std::sqrt; + using std::abs; const Index length = lastCol + 1 - firstCol; RealScalar norm1 = m_computed.block(firstCol+shift, firstCol+shift, length, 1).squaredNorm(); RealScalar norm2 = m_computed.block(firstCol+shift, firstCol+shift, length, length).diagonal().squaredNorm(); - RealScalar EPS = 10 * NumTraits::epsilon() * sqrt(norm1 + norm2); - if (m_computed(firstCol + shift, firstCol + shift) < EPS){ - m_computed(firstCol + shift, firstCol + shift) = EPS; - } + RealScalar epsilon = 10 * NumTraits::epsilon() * sqrt(norm1 + norm2); + if (m_computed(firstCol + shift, firstCol + shift) < epsilon) + m_computed(firstCol + shift, firstCol + shift) = epsilon; //condition 4.2 - for (Index i=firstCol + shift + 1;i<=lastCol + shift;i++){ - if (std::abs(m_computed(i, firstCol + shift)) < EPS){ + for (Index i=firstCol + shift + 1;i<=lastCol + shift;i++) + if (abs(m_computed(i, firstCol + shift)) < epsilon) m_computed(i, firstCol + shift) = 0; - } - } //condition 4.3 - for (Index i=firstCol + shift + 1;i<=lastCol + shift; i++){ - if (m_computed(i, i) < EPS){ + for (Index i=firstCol + shift + 1;i<=lastCol + shift; i++) + if (m_computed(i, i) < epsilon) deflation43(firstCol, shift, i, length); - } - } //condition 4.4 Index i=firstCol + shift + 1, j=firstCol + shift + k + 1; //we stock the final place of each line - Index *permutation = new Index[length]; + Index *permutation = new Index[length]; // FIXME avoid repeated dynamic memory allocation - for (Index p =1; p < length; p++) { - if (i> firstCol + shift + k){ - permutation[p] = j; - j++; - } else if (j> lastCol + shift) - { - permutation[p] = i; - i++; - } - else - { - if (m_computed(i, i) < m_computed(j, j)){ - permutation[p] = j; - j++; - } - else - { - permutation[p] = i; - i++; - } - } + for (Index p =1; p < length; p++) + { + if (i> firstCol + shift + k) permutation[p] = j++; + else if (j> lastCol + shift) permutation[p] = i++; + else if (m_computed(i, i) < m_computed(j, j)) permutation[p] = j++; + else permutation[p] = i++; } //we do the permutation RealScalar aux; //we stock the current index of each col //and the column of each index - Index *realInd = new Index[length]; - Index *realCol = new Index[length]; - for (int pos = 0; pos< length; pos++){ + Index *realInd = new Index[length]; // FIXME avoid repeated dynamic memory allocation + Index *realCol = new Index[length]; // FIXME avoid repeated dynamic memory allocation + for (int pos = 0; pos< length; pos++) + { realCol[pos] = pos + firstCol + shift; realInd[pos] = pos; } const Index Zero = firstCol + shift; VectorType temp; - for (int i = 1; i < length - 1; i++){ + for (int i = 1; i < length - 1; i++) + { const Index I = i + Zero; const Index realI = realInd[i]; const Index j = permutation[length - i] - Zero; @@ -825,25 +795,25 @@ void BDCSVD::deflation(Index firstCol, Index lastCol, Index k, Index m_computed(J, Zero) = aux; // change columns - if (compU) { + if (m_compU) + { temp = m_naiveU.col(I - shift).segment(firstCol, length + 1); - m_naiveU.col(I - shift).segment(firstCol, length + 1) << - m_naiveU.col(J - shift).segment(firstCol, length + 1); - m_naiveU.col(J - shift).segment(firstCol, length + 1) << temp; + m_naiveU.col(I - shift).segment(firstCol, length + 1) = m_naiveU.col(J - shift).segment(firstCol, length + 1); + m_naiveU.col(J - shift).segment(firstCol, length + 1) = temp; } else { temp = m_naiveU.col(I - shift).segment(0, 2); - m_naiveU.col(I - shift).segment(0, 2) << - m_naiveU.col(J - shift).segment(0, 2); - m_naiveU.col(J - shift).segment(0, 2) << temp; + m_naiveU.col(I - shift).template head<2>() = m_naiveU.col(J - shift).segment(0, 2); + m_naiveU.col(J - shift).template head<2>() = temp; } - if (compV) { + if (m_compV) + { const Index CWI = I + firstColW - Zero; const Index CWJ = J + firstColW - Zero; temp = m_naiveV.col(CWI).segment(firstRowW, length); - m_naiveV.col(CWI).segment(firstRowW, length) << m_naiveV.col(CWJ).segment(firstRowW, length); - m_naiveV.col(CWJ).segment(firstRowW, length) << temp; + m_naiveV.col(CWI).segment(firstRowW, length) = m_naiveV.col(CWJ).segment(firstRowW, length); + m_naiveV.col(CWJ).segment(firstRowW, length) = temp; } //update real pos @@ -852,20 +822,13 @@ void BDCSVD::deflation(Index firstCol, Index lastCol, Index k, Index realInd[J - Zero] = realI; realInd[I - Zero] = j; } - for (Index i = firstCol + shift + 1; i Date: Thu, 4 Sep 2014 09:17:01 +0200 Subject: Apply Householder U and V in-place. --- unsupported/Eigen/src/BDCSVD/BDCSVD.h | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'unsupported/Eigen') diff --git a/unsupported/Eigen/src/BDCSVD/BDCSVD.h b/unsupported/Eigen/src/BDCSVD/BDCSVD.h index 64cee029b..fcad104c0 100644 --- a/unsupported/Eigen/src/BDCSVD/BDCSVD.h +++ b/unsupported/Eigen/src/BDCSVD/BDCSVD.h @@ -279,7 +279,7 @@ void BDCSVD::copyUV(const HouseholderU &householderU, const Househol m_matrixU = MatrixX::Identity(householderU.cols(), Ucols); Index blockCols = m_computeThinU ? m_nonzeroSingularValues : m_diagSize; m_matrixU.topLeftCorner(m_diagSize, blockCols) = naiveV.template cast().topLeftCorner(m_diagSize, blockCols); - m_matrixU = householderU * m_matrixU; + householderU.applyThisOnTheLeft(m_matrixU); } if (computeV()) { @@ -287,7 +287,7 @@ void BDCSVD::copyUV(const HouseholderU &householderU, const Househol m_matrixV = MatrixX::Identity(householderV.cols(), Vcols); Index blockCols = m_computeThinV ? m_nonzeroSingularValues : m_diagSize; m_matrixV.topLeftCorner(m_diagSize, blockCols) = naiveU.template cast().topLeftCorner(m_diagSize, blockCols); - m_matrixV = householderV * m_matrixV; + householderV.applyThisOnTheLeft(m_matrixV); } } @@ -314,7 +314,7 @@ void BDCSVD::divide (Index firstCol, Index lastCol, Index firstRowW, RealScalar betaK; RealScalar r0; RealScalar lambda, phi, c0, s0; - MatrixXr l, f; + VectorType l, f; // We use the other algorithm which is more efficient for small // matrices. if (n < m_algoswap) @@ -385,7 +385,7 @@ void BDCSVD::divide (Index firstCol, Index lastCol, Index firstRowW, // first column = q2 * s0 m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) = m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) * s0; // q2 *= c0 - m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0; + m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0; } else { @@ -408,7 +408,6 @@ void BDCSVD::divide (Index firstCol, Index lastCol, Index firstRowW, m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) = alphaK * l.transpose().real(); m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) = betaK * f.transpose().real(); - // Second part: try to deflate singular values in combined matrix deflation(firstCol, lastCol, k, firstRowW, firstColW, shift); @@ -417,7 +416,7 @@ void BDCSVD::divide (Index firstCol, Index lastCol, Index firstRowW, VectorType singVals; computeSVDofM(firstCol + shift, n, UofSVD, singVals, VofSVD); if (m_compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1) *= UofSVD; // FIXME this requires a temporary - else m_naiveU.block(0, firstCol, 2, n + 1) *= UofSVD; // FIXME this requires a temporary, and exploit that there are 2 rows at compile time + else m_naiveU.middleCols(firstCol, n + 1) *= UofSVD; // FIXME this requires a temporary, and exploit that there are 2 rows at compile time if (m_compV) m_naiveV.block(firstRowW, firstColW, n, n) *= VofSVD; // FIXME this requires a temporary m_computed.block(firstCol + shift, firstCol + shift, n, n).setZero(); m_computed.block(firstCol + shift, firstCol + shift, n, n).diagonal() = singVals; @@ -434,7 +433,8 @@ template void BDCSVD::computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V) { // TODO Get rid of these copies (?) - ArrayXr col0 = m_computed.block(firstCol, firstCol, n, 1); + // FIXME at least preallocate them + ArrayXr col0 = m_computed.col(firstCol).segment(firstCol, n); ArrayXr diag = m_computed.block(firstCol, firstCol, n, n).diagonal(); diag(0) = 0; @@ -446,14 +446,15 @@ void BDCSVD::computeSVDofM(Index firstCol, Index n, MatrixXr& U, Vec if (col0.hasNaN() || diag.hasNaN()) return; ArrayXr shifts(n), mus(n), zhat(n); + computeSingVals(col0, diag, singVals, shifts, mus); perturbCol0(col0, diag, singVals, shifts, mus, zhat); computeSingVecs(zhat, diag, singVals, shifts, mus, U, V); // Reverse order so that singular values in increased order singVals.reverseInPlace(); - U.leftCols(n) = U.leftCols(n).rowwise().reverse().eval(); - if (m_compV) V = V.rowwise().reverse().eval(); + U.leftCols(n) = U.leftCols(n).rowwise().reverse().eval(); // FIXME this requires a temporary + if (m_compV) V = V.rowwise().reverse().eval(); // FIXME this requires a temporary } template -- cgit v1.2.3 From dacd39ea76d488133392b3abecf1c5061ba568d7 Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Fri, 5 Sep 2014 17:51:46 +0200 Subject: Exploit sparse structure in naiveU and naiveV when updating them. --- unsupported/Eigen/src/BDCSVD/BDCSVD.h | 54 +++++++++++++++++++++++++++++++++-- 1 file changed, 51 insertions(+), 3 deletions(-) (limited to 'unsupported/Eigen') diff --git a/unsupported/Eigen/src/BDCSVD/BDCSVD.h b/unsupported/Eigen/src/BDCSVD/BDCSVD.h index fcad104c0..0167872af 100644 --- a/unsupported/Eigen/src/BDCSVD/BDCSVD.h +++ b/unsupported/Eigen/src/BDCSVD/BDCSVD.h @@ -164,6 +164,7 @@ private: void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift); template void copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naivev); + static void structured_update(Block A, const MatrixXr &B, Index n1); protected: MatrixXr m_naiveU, m_naiveV; @@ -240,6 +241,8 @@ BDCSVD& BDCSVD::compute(const MatrixType& matrix, unsign internal::UpperBidiagonalization bid(copy); //**** step 2 Divide + m_naiveU.setZero(); + m_naiveV.setZero(); m_computed.topRows(m_diagSize) = bid.bidiagonal().toDenseMatrix().transpose(); m_computed.template bottomRows<1>().setZero(); divide(0, m_diagSize - 1, 0, 0, 0); @@ -291,6 +294,48 @@ void BDCSVD::copyUV(const HouseholderU &householderU, const Househol } } +/** \internal + * Performs A = A * B exploiting the special structure of the matrix A. Splitting A as: + * A = [A1] + * [A2] + * such that A1.rows()==n1, then we assume that at least half of the columns of A1 and A2 are zeros. + * We can thus pack them prior to the the matrix product. However, this is only worth the effort if the matrix is large + * enough. + */ +template +void BDCSVD::structured_update(Block A, const MatrixXr &B, Index n1) +{ + Index n = A.rows(); + if(n>100) + { + // If the matrices are large enough, let's exploit the sparse strucure of A by + // splitting it in half (wrt n1), and packing the non-zero columns. + DenseIndex n2 = n - n1; + MatrixXr A1(n1,n), A2(n2,n), B1(n,n), B2(n,n); + Index k1=0, k2=0; + for(Index j=0; j::divide (Index firstCol, Index lastCol, Index firstRowW, MatrixXr UofSVD, VofSVD; VectorType singVals; computeSVDofM(firstCol + shift, n, UofSVD, singVals, VofSVD); - if (m_compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1) *= UofSVD; // FIXME this requires a temporary - else m_naiveU.middleCols(firstCol, n + 1) *= UofSVD; // FIXME this requires a temporary, and exploit that there are 2 rows at compile time - if (m_compV) m_naiveV.block(firstRowW, firstColW, n, n) *= VofSVD; // FIXME this requires a temporary + + if (m_compU) structured_update(m_naiveU.block(firstCol, firstCol, n + 1, n + 1), UofSVD, (n+2)/2); + else m_naiveU.middleCols(firstCol, n + 1) *= UofSVD; // FIXME this requires a temporary, and exploit that there are 2 rows at compile time + + if (m_compV) structured_update(m_naiveV.block(firstRowW, firstColW, n, n), VofSVD, (n+1)/2); + m_computed.block(firstCol + shift, firstCol + shift, n, n).setZero(); m_computed.block(firstCol + shift, firstCol + shift, n, n).diagonal() = singVals; }// end divide -- cgit v1.2.3 From 0ca43f7e9a654e32da0066163a8656415961e266 Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Thu, 18 Sep 2014 15:15:27 +0200 Subject: Remove deprecated code not used by evaluators --- Eigen/Cholesky | 1 - Eigen/CholmodSupport | 4 - Eigen/Core | 25 +- Eigen/IterativeLinearSolvers | 6 - Eigen/LU | 1 - Eigen/PaStiXSupport | 4 - Eigen/QR | 1 - Eigen/SPQRSupport | 2 - Eigen/SVD | 1 - Eigen/SparseCholesky | 5 - Eigen/SparseLU | 3 - Eigen/SparseQR | 3 - Eigen/SuperLUSupport | 4 - Eigen/UmfPackSupport | 3 - Eigen/src/Cholesky/LDLT.h | 29 - Eigen/src/Cholesky/LLT.h | 30 -- Eigen/src/CholmodSupport/CholmodSupport.h | 62 --- Eigen/src/Core/ArrayBase.h | 65 --- Eigen/src/Core/Assign.h | 587 --------------------- Eigen/src/Core/AssignEvaluator.h | 6 - Eigen/src/Core/BandMatrix.h | 4 - Eigen/src/Core/Block.h | 15 - Eigen/src/Core/BooleanRedux.h | 51 -- Eigen/src/Core/CwiseBinaryOp.h | 72 --- Eigen/src/Core/CwiseNullaryOp.h | 9 - Eigen/src/Core/CwiseUnaryOp.h | 47 -- Eigen/src/Core/CwiseUnaryView.h | 31 -- Eigen/src/Core/DenseBase.h | 56 -- Eigen/src/Core/DenseCoeffsBase.h | 164 ------ Eigen/src/Core/Diagonal.h | 6 - Eigen/src/Core/DiagonalMatrix.h | 42 -- Eigen/src/Core/DiagonalProduct.h | 117 ---- Eigen/src/Core/Dot.h | 11 - Eigen/src/Core/EigenBase.h | 12 - Eigen/src/Core/Fuzzy.h | 5 - Eigen/src/Core/GeneralProduct.h | 458 ---------------- Eigen/src/Core/Inverse.h | 4 - Eigen/src/Core/Map.h | 18 - Eigen/src/Core/MapBase.h | 14 - Eigen/src/Core/Matrix.h | 4 - Eigen/src/Core/MatrixBase.h | 33 +- Eigen/src/Core/NoAlias.h | 63 --- Eigen/src/Core/PermutationMatrix.h | 73 --- Eigen/src/Core/PlainObjectBase.h | 25 - Eigen/src/Core/Product.h | 7 - Eigen/src/Core/ProductBase.h | 252 --------- Eigen/src/Core/Redux.h | 14 - Eigen/src/Core/Ref.h | 4 - Eigen/src/Core/Replicate.h | 5 - Eigen/src/Core/ReturnByValue.h | 11 +- Eigen/src/Core/Reverse.h | 10 - Eigen/src/Core/Select.h | 7 - Eigen/src/Core/SelfAdjointView.h | 137 ----- Eigen/src/Core/SelfCwiseBinaryOp.h | 209 -------- Eigen/src/Core/Solve.h | 2 - Eigen/src/Core/Swap.h | 131 ----- Eigen/src/Core/Transpose.h | 71 --- Eigen/src/Core/TriangularMatrix.h | 415 --------------- Eigen/src/Core/VectorwiseOp.h | 16 - Eigen/src/Core/Visitor.h | 13 - Eigen/src/Core/products/CoeffBasedProduct.h | 460 ---------------- Eigen/src/Core/products/GeneralMatrixMatrix.h | 82 --- .../Core/products/GeneralMatrixMatrixTriangular.h | 13 - Eigen/src/Core/products/SelfadjointMatrixMatrix.h | 54 -- Eigen/src/Core/products/SelfadjointMatrixVector.h | 113 ---- Eigen/src/Core/products/TriangularMatrixMatrix.h | 51 -- Eigen/src/Core/products/TriangularMatrixVector.h | 51 -- Eigen/src/Core/util/ForwardDeclarations.h | 4 - Eigen/src/Core/util/Macros.h | 42 -- Eigen/src/Core/util/XprHelper.h | 110 +--- Eigen/src/Geometry/AlignedBox.h | 4 - Eigen/src/Geometry/Homogeneous.h | 41 -- Eigen/src/Geometry/OrthoMethods.h | 15 - Eigen/src/Geometry/Quaternion.h | 4 - Eigen/src/Geometry/Transform.h | 4 - Eigen/src/Householder/HouseholderSequence.h | 3 - .../IterativeLinearSolvers/BasicPreconditioners.h | 29 - Eigen/src/IterativeLinearSolvers/BiCGSTAB.h | 37 -- .../src/IterativeLinearSolvers/ConjugateGradient.h | 37 -- Eigen/src/IterativeLinearSolvers/IncompleteLUT.h | 30 -- .../IterativeLinearSolvers/IterativeSolverBase.h | 72 --- Eigen/src/LU/Determinant.h | 4 - Eigen/src/LU/FullPivLU.h | 35 -- Eigen/src/LU/InverseImpl.h | 58 -- Eigen/src/LU/PartialPivLU.h | 35 -- Eigen/src/PaStiXSupport/PaStiXSupport.h | 64 --- Eigen/src/PardisoSupport/PardisoSupport.h | 62 --- Eigen/src/QR/ColPivHouseholderQR.h | 37 -- Eigen/src/QR/FullPivHouseholderQR.h | 40 -- Eigen/src/QR/HouseholderQR.h | 28 - Eigen/src/SPQRSupport/SuiteSparseQRSupport.h | 34 -- Eigen/src/SVD/SVDBase.h | 28 - Eigen/src/SparseCholesky/SimplicialCholesky.h | 76 +-- .../SparseCore/ConservativeSparseSparseProduct.h | 10 - Eigen/src/SparseCore/MappedSparseMatrix.h | 2 - Eigen/src/SparseCore/SparseAssign.h | 113 ---- Eigen/src/SparseCore/SparseBlock.h | 95 ---- Eigen/src/SparseCore/SparseCwiseBinaryOp.h | 254 --------- Eigen/src/SparseCore/SparseCwiseUnaryOp.h | 132 ----- Eigen/src/SparseCore/SparseDenseProduct.h | 248 +-------- Eigen/src/SparseCore/SparseDiagonalProduct.h | 178 ------- Eigen/src/SparseCore/SparseDot.h | 18 +- Eigen/src/SparseCore/SparseMatrix.h | 93 ---- Eigen/src/SparseCore/SparseMatrixBase.h | 55 -- Eigen/src/SparseCore/SparsePermutation.h | 44 -- Eigen/src/SparseCore/SparseProduct.h | 178 ------- Eigen/src/SparseCore/SparseRedux.h | 5 - Eigen/src/SparseCore/SparseSelfAdjointView.h | 149 +----- Eigen/src/SparseCore/SparseSolverBase.h | 9 +- .../SparseCore/SparseSparseProductWithPruning.h | 15 - Eigen/src/SparseCore/SparseTranspose.h | 51 -- Eigen/src/SparseCore/SparseTriangularView.h | 8 - Eigen/src/SparseCore/SparseUtil.h | 24 - Eigen/src/SparseCore/SparseVector.h | 25 - Eigen/src/SparseCore/SparseView.h | 43 -- Eigen/src/SparseCore/TriangularSolver.h | 156 ------ Eigen/src/SparseLU/SparseLU.h | 63 --- Eigen/src/SparseQR/SparseQR.h | 51 -- Eigen/src/SuperLUSupport/SuperLUSupport.h | 59 --- Eigen/src/UmfPackSupport/UmfPackSupport.h | 59 --- Eigen/src/misc/Solve.h | 78 --- Eigen/src/misc/SparseSolve.h | 134 ----- test/CMakeLists.txt | 5 - test/evaluators.cpp | 12 - test/mixingtypes.cpp | 10 +- test/nesting_ops.cpp | 5 - test/sparse_vector.cpp | 3 - test/vectorization_logic.cpp | 22 - unsupported/Eigen/IterativeSolvers | 3 - unsupported/Eigen/SparseExtra | 3 - unsupported/Eigen/src/IterativeSolvers/DGMRES.h | 36 -- unsupported/Eigen/src/IterativeSolvers/GMRES.h | 36 -- .../src/IterativeSolvers/IncompleteCholesky.h | 31 -- .../Eigen/src/IterativeSolvers/IncompleteLU.h | 30 -- unsupported/Eigen/src/IterativeSolvers/MINRES.h | 37 -- .../Eigen/src/SparseExtra/DynamicSparseMatrix.h | 2 - unsupported/test/CMakeLists.txt | 26 +- 137 files changed, 41 insertions(+), 7806 deletions(-) delete mode 100644 Eigen/src/Core/products/CoeffBasedProduct.h delete mode 100644 Eigen/src/misc/Solve.h delete mode 100644 Eigen/src/misc/SparseSolve.h (limited to 'unsupported/Eigen') diff --git a/Eigen/Cholesky b/Eigen/Cholesky index 7314d326c..dd0ca911c 100644 --- a/Eigen/Cholesky +++ b/Eigen/Cholesky @@ -21,7 +21,6 @@ * \endcode */ -#include "src/misc/Solve.h" #include "src/Cholesky/LLT.h" #include "src/Cholesky/LDLT.h" #ifdef EIGEN_USE_LAPACKE diff --git a/Eigen/CholmodSupport b/Eigen/CholmodSupport index 745b884e7..687cd9777 100644 --- a/Eigen/CholmodSupport +++ b/Eigen/CholmodSupport @@ -33,12 +33,8 @@ extern "C" { * */ -#include "src/misc/Solve.h" -#include "src/misc/SparseSolve.h" - #include "src/CholmodSupport/CholmodSupport.h" - #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_CHOLMODSUPPORT_MODULE_H diff --git a/Eigen/Core b/Eigen/Core index 47c4f6299..2510e4898 100644 --- a/Eigen/Core +++ b/Eigen/Core @@ -11,16 +11,6 @@ #ifndef EIGEN_CORE_H #define EIGEN_CORE_H -// EIGEN_TEST_EVALUATORS => EIGEN_ENABLE_EVALUATORS -#ifndef EIGEN_TEST_NO_EVALUATORS - #ifndef EIGEN_TEST_EVALUATORS - #define EIGEN_TEST_EVALUATORS - #endif - #ifndef EIGEN_ENABLE_EVALUATORS - #define EIGEN_ENABLE_EVALUATORS - #endif -#endif - // first thing Eigen does: stop the compiler from committing suicide #include "src/Core/util/DisableStupidWarnings.h" @@ -317,11 +307,9 @@ using std::ptrdiff_t; #include "src/Core/MatrixBase.h" #include "src/Core/EigenBase.h" -#ifdef EIGEN_ENABLE_EVALUATORS #include "src/Core/Product.h" #include "src/Core/CoreEvaluators.h" #include "src/Core/AssignEvaluator.h" -#endif #ifndef EIGEN_PARSED_BY_DOXYGEN // work around Doxygen bug triggered by Assign.h r814874 // at least confirmed with Doxygen 1.5.5 and 1.5.6 @@ -332,10 +320,10 @@ using std::ptrdiff_t; #include "src/Core/util/BlasUtil.h" #include "src/Core/DenseStorage.h" #include "src/Core/NestByValue.h" -#ifndef EIGEN_ENABLE_EVALUATORS -#include "src/Core/ForceAlignedAccess.h" -#include "src/Core/Flagged.h" -#endif + +// #include "src/Core/ForceAlignedAccess.h" +// #include "src/Core/Flagged.h" + #include "src/Core/ReturnByValue.h" #include "src/Core/NoAlias.h" #include "src/Core/PlainObjectBase.h" @@ -368,18 +356,13 @@ using std::ptrdiff_t; #include "src/Core/CommaInitializer.h" #include "src/Core/ProductBase.h" #include "src/Core/GeneralProduct.h" -#ifdef EIGEN_ENABLE_EVALUATORS #include "src/Core/Solve.h" #include "src/Core/Inverse.h" -#endif #include "src/Core/TriangularMatrix.h" #include "src/Core/SelfAdjointView.h" #include "src/Core/products/GeneralBlockPanelKernel.h" #include "src/Core/products/Parallelizer.h" -#include "src/Core/products/CoeffBasedProduct.h" -#ifdef EIGEN_ENABLE_EVALUATORS #include "src/Core/ProductEvaluators.h" -#endif #include "src/Core/products/GeneralMatrixVector.h" #include "src/Core/products/GeneralMatrixMatrix.h" #include "src/Core/SolveTriangular.h" diff --git a/Eigen/IterativeLinearSolvers b/Eigen/IterativeLinearSolvers index 4df2c5a14..c06668bd2 100644 --- a/Eigen/IterativeLinearSolvers +++ b/Eigen/IterativeLinearSolvers @@ -26,13 +26,7 @@ * \endcode */ -#ifndef EIGEN_TEST_EVALUATORS -#include "src/misc/Solve.h" -#include "src/misc/SparseSolve.h" -#else #include "src/IterativeLinearSolvers/SolveWithGuess.h" -#endif - #include "src/IterativeLinearSolvers/IterativeSolverBase.h" #include "src/IterativeLinearSolvers/BasicPreconditioners.h" #include "src/IterativeLinearSolvers/ConjugateGradient.h" diff --git a/Eigen/LU b/Eigen/LU index 38e9067c7..132ecc42c 100644 --- a/Eigen/LU +++ b/Eigen/LU @@ -16,7 +16,6 @@ * \endcode */ -#include "src/misc/Solve.h" #include "src/misc/Kernel.h" #include "src/misc/Image.h" #include "src/LU/FullPivLU.h" diff --git a/Eigen/PaStiXSupport b/Eigen/PaStiXSupport index 7c616ee5e..e7d275f97 100644 --- a/Eigen/PaStiXSupport +++ b/Eigen/PaStiXSupport @@ -35,12 +35,8 @@ extern "C" { * */ -#include "src/misc/Solve.h" -#include "src/misc/SparseSolve.h" - #include "src/PaStiXSupport/PaStiXSupport.h" - #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_PASTIXSUPPORT_MODULE_H diff --git a/Eigen/QR b/Eigen/QR index 4c2533610..230cb079a 100644 --- a/Eigen/QR +++ b/Eigen/QR @@ -24,7 +24,6 @@ * \endcode */ -#include "src/misc/Solve.h" #include "src/QR/HouseholderQR.h" #include "src/QR/FullPivHouseholderQR.h" #include "src/QR/ColPivHouseholderQR.h" diff --git a/Eigen/SPQRSupport b/Eigen/SPQRSupport index 77016442e..e3f49bb5a 100644 --- a/Eigen/SPQRSupport +++ b/Eigen/SPQRSupport @@ -21,8 +21,6 @@ * */ -#include "src/misc/Solve.h" -#include "src/misc/SparseSolve.h" #include "src/CholmodSupport/CholmodSupport.h" #include "src/SPQRSupport/SuiteSparseQRSupport.h" diff --git a/Eigen/SVD b/Eigen/SVD index c3d24286c..c13472e82 100644 --- a/Eigen/SVD +++ b/Eigen/SVD @@ -20,7 +20,6 @@ * \endcode */ -#include "src/misc/Solve.h" #include "src/SVD/SVDBase.h" #include "src/SVD/JacobiSVD.h" #if defined(EIGEN_USE_LAPACKE) && !defined(EIGEN_USE_LAPACKE_STRICT) diff --git a/Eigen/SparseCholesky b/Eigen/SparseCholesky index 2c4f66105..b6a320c40 100644 --- a/Eigen/SparseCholesky +++ b/Eigen/SparseCholesky @@ -34,11 +34,6 @@ #error The SparseCholesky module has nothing to offer in MPL2 only mode #endif -#ifndef EIGEN_TEST_EVALUATORS -#include "src/misc/Solve.h" -#include "src/misc/SparseSolve.h" -#endif - #include "src/SparseCholesky/SimplicialCholesky.h" #ifndef EIGEN_MPL2_ONLY diff --git a/Eigen/SparseLU b/Eigen/SparseLU index 8527a49bd..38b38b531 100644 --- a/Eigen/SparseLU +++ b/Eigen/SparseLU @@ -20,9 +20,6 @@ * Please, see the documentation of the SparseLU class for more details. */ -#include "src/misc/Solve.h" -#include "src/misc/SparseSolve.h" - // Ordering interface #include "OrderingMethods" diff --git a/Eigen/SparseQR b/Eigen/SparseQR index 4ee42065e..efb2695ba 100644 --- a/Eigen/SparseQR +++ b/Eigen/SparseQR @@ -21,9 +21,6 @@ * */ -#include "src/misc/Solve.h" -#include "src/misc/SparseSolve.h" - #include "OrderingMethods" #include "src/SparseCore/SparseColEtree.h" #include "src/SparseQR/SparseQR.h" diff --git a/Eigen/SuperLUSupport b/Eigen/SuperLUSupport index 575e14fbc..d1eac9464 100644 --- a/Eigen/SuperLUSupport +++ b/Eigen/SuperLUSupport @@ -48,12 +48,8 @@ namespace Eigen { struct SluMatrix; } * */ -#include "src/misc/Solve.h" -#include "src/misc/SparseSolve.h" - #include "src/SuperLUSupport/SuperLUSupport.h" - #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_SUPERLUSUPPORT_MODULE_H diff --git a/Eigen/UmfPackSupport b/Eigen/UmfPackSupport index 984f64a84..0efad5dee 100644 --- a/Eigen/UmfPackSupport +++ b/Eigen/UmfPackSupport @@ -26,9 +26,6 @@ extern "C" { * */ -#include "src/misc/Solve.h" -#include "src/misc/SparseSolve.h" - #include "src/UmfPackSupport/UmfPackSupport.h" #include "src/Core/util/ReenableStupidWarnings.h" diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h index f621323a3..32c770654 100644 --- a/Eigen/src/Cholesky/LDLT.h +++ b/Eigen/src/Cholesky/LDLT.h @@ -174,7 +174,6 @@ template class LDLT * * \sa MatrixBase::ldlt(), SelfAdjointView::ldlt() */ -#ifdef EIGEN_TEST_EVALUATORS template inline const Solve solve(const MatrixBase& b) const @@ -184,17 +183,6 @@ template class LDLT && "LDLT::solve(): invalid number of rows of the right hand side matrix b"); return Solve(*this, b.derived()); } -#else - template - inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(m_isInitialized && "LDLT is not initialized."); - eigen_assert(m_matrix.rows()==b.rows() - && "LDLT::solve(): invalid number of rows of the right hand side matrix b"); - return internal::solve_retval(*this, b.derived()); - } -#endif template bool solveInPlace(MatrixBase &bAndX) const; @@ -524,23 +512,6 @@ void LDLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) cons dst = m_transpositions.transpose() * dst; } #endif - -namespace internal { -#ifndef EIGEN_TEST_EVALUATORS -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef LDLT<_MatrixType,_UpLo> LDLTType; - EIGEN_MAKE_SOLVE_HELPERS(LDLTType,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(),dst); - } -}; -#endif -} /** \internal use x = ldlt_object.solve(x); * diff --git a/Eigen/src/Cholesky/LLT.h b/Eigen/src/Cholesky/LLT.h index 89fb9a011..cb9e0eb7b 100644 --- a/Eigen/src/Cholesky/LLT.h +++ b/Eigen/src/Cholesky/LLT.h @@ -117,7 +117,6 @@ template class LLT * * \sa solveInPlace(), MatrixBase::llt(), SelfAdjointView::llt() */ -#ifdef EIGEN_TEST_EVALUATORS template inline const Solve solve(const MatrixBase& b) const @@ -127,17 +126,6 @@ template class LLT && "LLT::solve(): invalid number of rows of the right hand side matrix b"); return Solve(*this, b.derived()); } -#else - template - inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(m_isInitialized && "LLT is not initialized."); - eigen_assert(m_matrix.rows()==b.rows() - && "LLT::solve(): invalid number of rows of the right hand side matrix b"); - return internal::solve_retval(*this, b.derived()); - } -#endif template void solveInPlace(MatrixBase &bAndX) const; @@ -433,24 +421,6 @@ void LLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const } #endif -namespace internal { -#ifndef EIGEN_TEST_EVALUATORS -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef LLT<_MatrixType,UpLo> LLTType; - EIGEN_MAKE_SOLVE_HELPERS(LLTType,Rhs) - - template void evalTo(Dest& dst) const - { - dst = rhs(); - dec().solveInPlace(dst); - } -}; -#endif -} - /** \internal use x = llt_object.solve(x); * * This is the \em in-place version of solve(). diff --git a/Eigen/src/CholmodSupport/CholmodSupport.h b/Eigen/src/CholmodSupport/CholmodSupport.h index d3db51d0b..3524ffb2d 100644 --- a/Eigen/src/CholmodSupport/CholmodSupport.h +++ b/Eigen/src/CholmodSupport/CholmodSupport.h @@ -217,36 +217,6 @@ class CholmodBase : public SparseSolverBase return derived(); } -#ifndef EIGEN_TEST_EVALUATORS - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. - * - * \sa compute() - */ - template - inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(m_isInitialized && "LLT is not initialized."); - eigen_assert(rows()==b.rows() - && "CholmodDecomposition::solve(): invalid number of rows of the right hand side matrix b"); - return internal::solve_retval(*this, b.derived()); - } - - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. - * - * \sa compute() - */ - template - inline const internal::sparse_solve_retval - solve(const SparseMatrixBase& b) const - { - eigen_assert(m_isInitialized && "LLT is not initialized."); - eigen_assert(rows()==b.rows() - && "CholmodDecomposition::solve(): invalid number of rows of the right hand side matrix b"); - return internal::sparse_solve_retval(*this, b.derived()); - } -#endif // EIGEN_TEST_EVALUATORS - /** Performs a symbolic decomposition on the sparsity pattern of \a matrix. * * This function is particularly useful when solving for several problems having the same structure. @@ -574,38 +544,6 @@ class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecom } }; -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef CholmodBase<_MatrixType,_UpLo,Derived> Dec; - EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(),dst); - } -}; - -template -struct sparse_solve_retval, Rhs> - : sparse_solve_retval_base, Rhs> -{ - typedef CholmodBase<_MatrixType,_UpLo,Derived> Dec; - EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(),dst); - } -}; - -} // end namespace internal -#endif - } // end namespace Eigen #endif // EIGEN_CHOLMODSUPPORT_H diff --git a/Eigen/src/Core/ArrayBase.h b/Eigen/src/Core/ArrayBase.h index f5bae6357..4e80634b9 100644 --- a/Eigen/src/Core/ArrayBase.h +++ b/Eigen/src/Core/ArrayBase.h @@ -64,9 +64,6 @@ template class ArrayBase using Base::MaxSizeAtCompileTime; using Base::IsVectorAtCompileTime; using Base::Flags; -#ifndef EIGEN_TEST_EVALUATORS - using Base::CoeffReadCost; -#endif using Base::derived; using Base::const_cast_derived; @@ -123,11 +120,7 @@ template class ArrayBase EIGEN_DEVICE_FUNC Derived& operator=(const ArrayBase& other) { -#ifndef EIGEN_TEST_EVALUATORS - return internal::assign_selector::run(derived(), other.derived()); -#else internal::call_assignment(derived(), other.derived()); -#endif } EIGEN_DEVICE_FUNC @@ -183,7 +176,6 @@ template class ArrayBase {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} }; -#ifdef EIGEN_TEST_EVALUATORS /** replaces \c *this by \c *this - \a other. * * \returns a reference to \c *this @@ -235,63 +227,6 @@ ArrayBase::operator/=(const ArrayBase& other) call_assignment(derived(), other.derived(), internal::div_assign_op()); return derived(); } -#else // EIGEN_TEST_EVALUATORS -/** replaces \c *this by \c *this - \a other. - * - * \returns a reference to \c *this - */ -template -template -EIGEN_STRONG_INLINE Derived & -ArrayBase::operator-=(const ArrayBase &other) -{ - SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); - tmp = other.derived(); - return derived(); -} - -/** replaces \c *this by \c *this + \a other. - * - * \returns a reference to \c *this - */ -template -template -EIGEN_STRONG_INLINE Derived & -ArrayBase::operator+=(const ArrayBase& other) -{ - SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); - tmp = other.derived(); - return derived(); -} - -/** replaces \c *this by \c *this * \a other coefficient wise. - * - * \returns a reference to \c *this - */ -template -template -EIGEN_STRONG_INLINE Derived & -ArrayBase::operator*=(const ArrayBase& other) -{ - SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); - tmp = other.derived(); - return derived(); -} - -/** replaces \c *this by \c *this / \a other coefficient wise. - * - * \returns a reference to \c *this - */ -template -template -EIGEN_STRONG_INLINE Derived & -ArrayBase::operator/=(const ArrayBase& other) -{ - SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); - tmp = other.derived(); - return derived(); -} -#endif } // end namespace Eigen diff --git a/Eigen/src/Core/Assign.h b/Eigen/src/Core/Assign.h index 5395e5436..53806ba33 100644 --- a/Eigen/src/Core/Assign.h +++ b/Eigen/src/Core/Assign.h @@ -13,487 +13,6 @@ #define EIGEN_ASSIGN_H namespace Eigen { -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - -/*************************************************************************** -* Part 1 : the logic deciding a strategy for traversal and unrolling * -***************************************************************************/ - -template -struct assign_traits -{ -public: - enum { - DstIsAligned = Derived::Flags & AlignedBit, - DstHasDirectAccess = Derived::Flags & DirectAccessBit, - SrcIsAligned = OtherDerived::Flags & AlignedBit, - JointAlignment = bool(DstIsAligned) && bool(SrcIsAligned) ? Aligned : Unaligned - }; - -private: - enum { - InnerSize = int(Derived::IsVectorAtCompileTime) ? int(Derived::SizeAtCompileTime) - : int(Derived::Flags)&RowMajorBit ? int(Derived::ColsAtCompileTime) - : int(Derived::RowsAtCompileTime), - InnerMaxSize = int(Derived::IsVectorAtCompileTime) ? int(Derived::MaxSizeAtCompileTime) - : int(Derived::Flags)&RowMajorBit ? int(Derived::MaxColsAtCompileTime) - : int(Derived::MaxRowsAtCompileTime), - MaxSizeAtCompileTime = Derived::SizeAtCompileTime, - PacketSize = packet_traits::size - }; - - enum { - StorageOrdersAgree = (int(Derived::IsRowMajor) == int(OtherDerived::IsRowMajor)), - MightVectorize = StorageOrdersAgree - && (int(Derived::Flags) & int(OtherDerived::Flags) & ActualPacketAccessBit), - MayInnerVectorize = MightVectorize && int(InnerSize)!=Dynamic && int(InnerSize)%int(PacketSize)==0 - && int(DstIsAligned) && int(SrcIsAligned), - MayLinearize = StorageOrdersAgree && (int(Derived::Flags) & int(OtherDerived::Flags) & LinearAccessBit), - MayLinearVectorize = MightVectorize && MayLinearize && DstHasDirectAccess - && (DstIsAligned || MaxSizeAtCompileTime == Dynamic), - /* If the destination isn't aligned, we have to do runtime checks and we don't unroll, - so it's only good for large enough sizes. */ - MaySliceVectorize = MightVectorize && DstHasDirectAccess - && (int(InnerMaxSize)==Dynamic || int(InnerMaxSize)>=3*PacketSize) - /* slice vectorization can be slow, so we only want it if the slices are big, which is - indicated by InnerMaxSize rather than InnerSize, think of the case of a dynamic block - in a fixed-size matrix */ - }; - -public: - enum { - Traversal = int(MayInnerVectorize) ? int(InnerVectorizedTraversal) - : int(MayLinearVectorize) ? int(LinearVectorizedTraversal) - : int(MaySliceVectorize) ? int(SliceVectorizedTraversal) - : int(MayLinearize) ? int(LinearTraversal) - : int(DefaultTraversal), - Vectorized = int(Traversal) == InnerVectorizedTraversal - || int(Traversal) == LinearVectorizedTraversal - || int(Traversal) == SliceVectorizedTraversal - }; - -private: - enum { - UnrollingLimit = EIGEN_UNROLLING_LIMIT * (Vectorized ? int(PacketSize) : 1), - MayUnrollCompletely = int(Derived::SizeAtCompileTime) != Dynamic - && int(OtherDerived::CoeffReadCost) != Dynamic - && int(Derived::SizeAtCompileTime) * int(OtherDerived::CoeffReadCost) <= int(UnrollingLimit), - MayUnrollInner = int(InnerSize) != Dynamic - && int(OtherDerived::CoeffReadCost) != Dynamic - && int(InnerSize) * int(OtherDerived::CoeffReadCost) <= int(UnrollingLimit) - }; - -public: - enum { - Unrolling = (int(Traversal) == int(InnerVectorizedTraversal) || int(Traversal) == int(DefaultTraversal)) - ? ( - int(MayUnrollCompletely) ? int(CompleteUnrolling) - : int(MayUnrollInner) ? int(InnerUnrolling) - : int(NoUnrolling) - ) - : int(Traversal) == int(LinearVectorizedTraversal) - ? ( bool(MayUnrollCompletely) && bool(DstIsAligned) ? int(CompleteUnrolling) : int(NoUnrolling) ) - : int(Traversal) == int(LinearTraversal) - ? ( bool(MayUnrollCompletely) ? int(CompleteUnrolling) : int(NoUnrolling) ) - : int(NoUnrolling) - }; - -#ifdef EIGEN_DEBUG_ASSIGN - static void debug() - { - EIGEN_DEBUG_VAR(DstIsAligned) - EIGEN_DEBUG_VAR(SrcIsAligned) - EIGEN_DEBUG_VAR(JointAlignment) - EIGEN_DEBUG_VAR(Derived::SizeAtCompileTime) - EIGEN_DEBUG_VAR(OtherDerived::CoeffReadCost) - EIGEN_DEBUG_VAR(InnerSize) - EIGEN_DEBUG_VAR(InnerMaxSize) - EIGEN_DEBUG_VAR(PacketSize) - EIGEN_DEBUG_VAR(StorageOrdersAgree) - EIGEN_DEBUG_VAR(MightVectorize) - EIGEN_DEBUG_VAR(MayLinearize) - EIGEN_DEBUG_VAR(MayInnerVectorize) - EIGEN_DEBUG_VAR(MayLinearVectorize) - EIGEN_DEBUG_VAR(MaySliceVectorize) - EIGEN_DEBUG_VAR(Traversal) - EIGEN_DEBUG_VAR(UnrollingLimit) - EIGEN_DEBUG_VAR(MayUnrollCompletely) - EIGEN_DEBUG_VAR(MayUnrollInner) - EIGEN_DEBUG_VAR(Unrolling) - } -#endif -}; - -/*************************************************************************** -* Part 2 : meta-unrollers -***************************************************************************/ - -/************************ -*** Default traversal *** -************************/ - -template -struct assign_DefaultTraversal_CompleteUnrolling -{ - enum { - outer = Index / Derived1::InnerSizeAtCompileTime, - inner = Index % Derived1::InnerSizeAtCompileTime - }; - - EIGEN_DEVICE_FUNC - static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) - { - dst.copyCoeffByOuterInner(outer, inner, src); - assign_DefaultTraversal_CompleteUnrolling::run(dst, src); - } -}; - -template -struct assign_DefaultTraversal_CompleteUnrolling -{ - EIGEN_DEVICE_FUNC - static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &) {} -}; - -template -struct assign_DefaultTraversal_InnerUnrolling -{ - EIGEN_DEVICE_FUNC - static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, typename Derived1::Index outer) - { - dst.copyCoeffByOuterInner(outer, Index, src); - assign_DefaultTraversal_InnerUnrolling::run(dst, src, outer); - } -}; - -template -struct assign_DefaultTraversal_InnerUnrolling -{ - EIGEN_DEVICE_FUNC - static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, typename Derived1::Index) {} -}; - -/*********************** -*** Linear traversal *** -***********************/ - -template -struct assign_LinearTraversal_CompleteUnrolling -{ - EIGEN_DEVICE_FUNC - static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) - { - dst.copyCoeff(Index, src); - assign_LinearTraversal_CompleteUnrolling::run(dst, src); - } -}; - -template -struct assign_LinearTraversal_CompleteUnrolling -{ - EIGEN_DEVICE_FUNC - static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &) {} -}; - -/************************** -*** Inner vectorization *** -**************************/ - -template -struct assign_innervec_CompleteUnrolling -{ - enum { - outer = Index / Derived1::InnerSizeAtCompileTime, - inner = Index % Derived1::InnerSizeAtCompileTime, - JointAlignment = assign_traits::JointAlignment - }; - - static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) - { - dst.template copyPacketByOuterInner(outer, inner, src); - assign_innervec_CompleteUnrolling::size, Stop>::run(dst, src); - } -}; - -template -struct assign_innervec_CompleteUnrolling -{ - static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &) {} -}; - -template -struct assign_innervec_InnerUnrolling -{ - static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, typename Derived1::Index outer) - { - dst.template copyPacketByOuterInner(outer, Index, src); - assign_innervec_InnerUnrolling::size, Stop>::run(dst, src, outer); - } -}; - -template -struct assign_innervec_InnerUnrolling -{ - static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, typename Derived1::Index) {} -}; - -/*************************************************************************** -* Part 3 : implementation of all cases -***************************************************************************/ - -template::Traversal, - int Unrolling = assign_traits::Unrolling, - int Version = Specialized> -struct assign_impl; - -/************************ -*** Default traversal *** -************************/ - -template -struct assign_impl -{ - EIGEN_DEVICE_FUNC - static inline void run(Derived1 &, const Derived2 &) { } -}; - -template -struct assign_impl -{ - typedef typename Derived1::Index Index; - EIGEN_DEVICE_FUNC - static inline void run(Derived1 &dst, const Derived2 &src) - { - const Index innerSize = dst.innerSize(); - const Index outerSize = dst.outerSize(); - for(Index outer = 0; outer < outerSize; ++outer) - for(Index inner = 0; inner < innerSize; ++inner) - dst.copyCoeffByOuterInner(outer, inner, src); - } -}; - -template -struct assign_impl -{ - EIGEN_DEVICE_FUNC - static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) - { - assign_DefaultTraversal_CompleteUnrolling - ::run(dst, src); - } -}; - -template -struct assign_impl -{ - typedef typename Derived1::Index Index; - EIGEN_DEVICE_FUNC - static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) - { - const Index outerSize = dst.outerSize(); - for(Index outer = 0; outer < outerSize; ++outer) - assign_DefaultTraversal_InnerUnrolling - ::run(dst, src, outer); - } -}; - -/*********************** -*** Linear traversal *** -***********************/ - -template -struct assign_impl -{ - typedef typename Derived1::Index Index; - EIGEN_DEVICE_FUNC - static inline void run(Derived1 &dst, const Derived2 &src) - { - const Index size = dst.size(); - for(Index i = 0; i < size; ++i) - dst.copyCoeff(i, src); - } -}; - -template -struct assign_impl -{ - EIGEN_DEVICE_FUNC - static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) - { - assign_LinearTraversal_CompleteUnrolling - ::run(dst, src); - } -}; - -/************************** -*** Inner vectorization *** -**************************/ - -template -struct assign_impl -{ - typedef typename Derived1::Index Index; - static inline void run(Derived1 &dst, const Derived2 &src) - { - const Index innerSize = dst.innerSize(); - const Index outerSize = dst.outerSize(); - const Index packetSize = packet_traits::size; - for(Index outer = 0; outer < outerSize; ++outer) - for(Index inner = 0; inner < innerSize; inner+=packetSize) - dst.template copyPacketByOuterInner(outer, inner, src); - } -}; - -template -struct assign_impl -{ - static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) - { - assign_innervec_CompleteUnrolling - ::run(dst, src); - } -}; - -template -struct assign_impl -{ - typedef typename Derived1::Index Index; - static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) - { - const Index outerSize = dst.outerSize(); - for(Index outer = 0; outer < outerSize; ++outer) - assign_innervec_InnerUnrolling - ::run(dst, src, outer); - } -}; - -/*************************** -*** Linear vectorization *** -***************************/ - -template -struct unaligned_assign_impl -{ - template - static EIGEN_STRONG_INLINE void run(const Derived&, OtherDerived&, typename Derived::Index, typename Derived::Index) {} -}; - -template <> -struct unaligned_assign_impl -{ - // MSVC must not inline this functions. If it does, it fails to optimize the - // packet access path. -#ifdef _MSC_VER - template - static EIGEN_DONT_INLINE void run(const Derived& src, OtherDerived& dst, typename Derived::Index start, typename Derived::Index end) -#else - template - static EIGEN_STRONG_INLINE void run(const Derived& src, OtherDerived& dst, typename Derived::Index start, typename Derived::Index end) -#endif - { - for (typename Derived::Index index = start; index < end; ++index) - dst.copyCoeff(index, src); - } -}; - -template -struct assign_impl -{ - typedef typename Derived1::Index Index; - static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) - { - const Index size = dst.size(); - typedef packet_traits PacketTraits; - enum { - packetSize = PacketTraits::size, - dstAlignment = PacketTraits::AlignedOnScalar ? Aligned : int(assign_traits::DstIsAligned) , - srcAlignment = assign_traits::JointAlignment - }; - const Index alignedStart = assign_traits::DstIsAligned ? 0 - : internal::first_aligned(&dst.coeffRef(0), size); - const Index alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize; - - unaligned_assign_impl::DstIsAligned!=0>::run(src,dst,0,alignedStart); - - for(Index index = alignedStart; index < alignedEnd; index += packetSize) - { - dst.template copyPacket(index, src); - } - - unaligned_assign_impl<>::run(src,dst,alignedEnd,size); - } -}; - -template -struct assign_impl -{ - typedef typename Derived1::Index Index; - static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) - { - enum { size = Derived1::SizeAtCompileTime, - packetSize = packet_traits::size, - alignedSize = (size/packetSize)*packetSize }; - - assign_innervec_CompleteUnrolling::run(dst, src); - assign_DefaultTraversal_CompleteUnrolling::run(dst, src); - } -}; - -/************************** -*** Slice vectorization *** -***************************/ - -template -struct assign_impl -{ - typedef typename Derived1::Index Index; - static inline void run(Derived1 &dst, const Derived2 &src) - { - typedef packet_traits PacketTraits; - enum { - packetSize = PacketTraits::size, - alignable = PacketTraits::AlignedOnScalar, - dstAlignment = alignable ? Aligned : int(assign_traits::DstIsAligned) , - srcAlignment = assign_traits::JointAlignment - }; - const Index packetAlignedMask = packetSize - 1; - const Index innerSize = dst.innerSize(); - const Index outerSize = dst.outerSize(); - const Index alignedStep = alignable ? (packetSize - dst.outerStride() % packetSize) & packetAlignedMask : 0; - Index alignedStart = ((!alignable) || assign_traits::DstIsAligned) ? 0 - : internal::first_aligned(&dst.coeffRef(0,0), innerSize); - - for(Index outer = 0; outer < outerSize; ++outer) - { - const Index alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask); - // do the non-vectorizable part of the assignment - for(Index inner = 0; inner(outer, inner, src); - - // do the non-vectorizable part of the assignment - for(Index inner = alignedEnd; inner((alignedStart+alignedStep)%packetSize, innerSize); - } - } -}; - -} // end namespace internal - -#endif // EIGEN_TEST_EVALUATORS - -/*************************************************************************** -* Part 4 : implementation of DenseBase methods -***************************************************************************/ template template @@ -508,71 +27,12 @@ EIGEN_STRONG_INLINE Derived& DenseBase EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived) EIGEN_STATIC_ASSERT(SameType,YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) -#ifdef EIGEN_TEST_EVALUATORS - eigen_assert(rows() == other.rows() && cols() == other.cols()); internal::call_assignment_no_alias(derived(),other.derived()); -#else // EIGEN_TEST_EVALUATORS - -#ifdef EIGEN_DEBUG_ASSIGN - internal::assign_traits::debug(); -#endif - eigen_assert(rows() == other.rows() && cols() == other.cols()); - internal::assign_impl::Traversal) - : int(InvalidTraversal)>::run(derived(),other.derived()); - -#ifndef EIGEN_NO_DEBUG - checkTransposeAliasing(other.derived()); -#endif - -#endif // EIGEN_TEST_EVALUATORS - return derived(); } -namespace internal { - -#ifndef EIGEN_TEST_EVALUATORS -template::Flags) & EvalBeforeAssigningBit) != 0, - bool NeedToTranspose = ((int(Derived::RowsAtCompileTime) == 1 && int(OtherDerived::ColsAtCompileTime) == 1) - | // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&". - // revert to || as soon as not needed anymore. - (int(Derived::ColsAtCompileTime) == 1 && int(OtherDerived::RowsAtCompileTime) == 1)) - && int(Derived::SizeAtCompileTime) != 1> -struct assign_selector; - -template -struct assign_selector { - EIGEN_DEVICE_FUNC - static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.derived()); } - template - EIGEN_DEVICE_FUNC - static EIGEN_STRONG_INLINE Derived& evalTo(ActualDerived& dst, const ActualOtherDerived& other) { other.evalTo(dst); return dst; } -}; -template -struct assign_selector { - EIGEN_DEVICE_FUNC - static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.eval()); } -}; -template -struct assign_selector { - EIGEN_DEVICE_FUNC - static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose()); } - template - EIGEN_DEVICE_FUNC - static EIGEN_STRONG_INLINE Derived& evalTo(ActualDerived& dst, const ActualOtherDerived& other) { Transpose dstTrans(dst); other.evalTo(dstTrans); return dst; } -}; -template -struct assign_selector { - EIGEN_DEVICE_FUNC - static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose().eval()); } -}; -#endif // EIGEN_TEST_EVALUATORS -} // end namespace internal - -#ifdef EIGEN_TEST_EVALUATORS template template EIGEN_DEVICE_FUNC @@ -624,53 +84,6 @@ EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const ReturnByValue< other.derived().evalTo(derived()); return derived(); } -#else // EIGEN_TEST_EVALUATORS -template -template -EIGEN_DEVICE_FUNC -EIGEN_STRONG_INLINE Derived& DenseBase::operator=(const DenseBase& other) -{ - return internal::assign_selector::run(derived(), other.derived()); -} - -template -EIGEN_DEVICE_FUNC -EIGEN_STRONG_INLINE Derived& DenseBase::operator=(const DenseBase& other) -{ - return internal::assign_selector::run(derived(), other.derived()); -} - -template -EIGEN_DEVICE_FUNC -EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const MatrixBase& other) -{ - return internal::assign_selector::run(derived(), other.derived()); -} - -template -template -EIGEN_DEVICE_FUNC -EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const DenseBase& other) -{ - return internal::assign_selector::run(derived(), other.derived()); -} - -template -template -EIGEN_DEVICE_FUNC -EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const EigenBase& other) -{ - return internal::assign_selector::evalTo(derived(), other.derived()); -} - -template -template -EIGEN_DEVICE_FUNC -EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const ReturnByValue& other) -{ - return internal::assign_selector::evalTo(derived(), other.derived()); -} -#endif // EIGEN_TEST_EVALUATORS } // end namespace Eigen diff --git a/Eigen/src/Core/AssignEvaluator.h b/Eigen/src/Core/AssignEvaluator.h index 4e35e432e..8ab71446c 100644 --- a/Eigen/src/Core/AssignEvaluator.h +++ b/Eigen/src/Core/AssignEvaluator.h @@ -695,13 +695,7 @@ void call_assignment(const Dst& dst, const Src& src) template void call_assignment(Dst& dst, const Src& src, const Func& func, typename enable_if::AssumeAliasing==1, void*>::type = 0) { -#ifdef EIGEN_TEST_EVALUATORS typename plain_matrix_type::type tmp(src); -#else - typename Src::PlainObject tmp(src.rows(), src.cols()); - call_assignment_no_alias(tmp, src, internal::assign_op()); -#endif - call_assignment_no_alias(dst, tmp, func); } diff --git a/Eigen/src/Core/BandMatrix.h b/Eigen/src/Core/BandMatrix.h index ba4749707..b0ebe1160 100644 --- a/Eigen/src/Core/BandMatrix.h +++ b/Eigen/src/Core/BandMatrix.h @@ -328,8 +328,6 @@ class TridiagonalMatrix : public BandMatrix @@ -347,8 +345,6 @@ struct evaluator_traits struct AssignmentKind { typedef EigenBase2EigenBase Kind; }; -#endif - } // end namespace internal diff --git a/Eigen/src/Core/Block.h b/Eigen/src/Core/Block.h index 2d62f7a46..737e5dc24 100644 --- a/Eigen/src/Core/Block.h +++ b/Eigen/src/Core/Block.h @@ -84,26 +84,11 @@ struct traits > : traits::size) == 0) - && (InnerStrideAtCompileTime == 1) - ? PacketAccessBit : 0, - MaskAlignedBit = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % EIGEN_ALIGN_BYTES) == 0)) ? AlignedBit : 0, - FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (traits::Flags&LinearAccessBit))) ? LinearAccessBit : 0, - FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, - FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0, - Flags0 = traits::Flags & ( (HereditaryBits & ~RowMajorBit) | - DirectAccessBit | - MaskPacketAccessBit | - MaskAlignedBit), - Flags = Flags0 | FlagsLinearAccessBit | FlagsLvalueBit | FlagsRowMajorBit -#else // FIXME, this traits is rather specialized for dense object and it needs to be cleaned further FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0, Flags = (traits::Flags & DirectAccessBit) | FlagsLvalueBit | FlagsRowMajorBit // FIXME DirectAccessBit should not be handled by expressions -#endif }; }; diff --git a/Eigen/src/Core/BooleanRedux.h b/Eigen/src/Core/BooleanRedux.h index 192e1db53..dac1887e0 100644 --- a/Eigen/src/Core/BooleanRedux.h +++ b/Eigen/src/Core/BooleanRedux.h @@ -17,18 +17,11 @@ namespace internal { template struct all_unroller { -#ifdef EIGEN_TEST_EVALUATORS typedef typename Derived::ExpressionTraits Traits; enum { col = (UnrollCount-1) / Traits::RowsAtCompileTime, row = (UnrollCount-1) % Traits::RowsAtCompileTime }; -#else - enum { - col = (UnrollCount-1) / Derived::RowsAtCompileTime, - row = (UnrollCount-1) % Derived::RowsAtCompileTime - }; -#endif static inline bool run(const Derived &mat) { @@ -51,18 +44,11 @@ struct all_unroller template struct any_unroller { -#ifdef EIGEN_TEST_EVALUATORS typedef typename Derived::ExpressionTraits Traits; enum { col = (UnrollCount-1) / Traits::RowsAtCompileTime, row = (UnrollCount-1) % Traits::RowsAtCompileTime }; -#else - enum { - col = (UnrollCount-1) / Derived::RowsAtCompileTime, - row = (UnrollCount-1) % Derived::RowsAtCompileTime - }; -#endif static inline bool run(const Derived &mat) { @@ -94,7 +80,6 @@ struct any_unroller template inline bool DenseBase::all() const { -#ifdef EIGEN_TEST_EVALUATORS typedef typename internal::evaluator::type Evaluator; enum { unroll = SizeAtCompileTime != Dynamic @@ -112,24 +97,6 @@ inline bool DenseBase::all() const if (!evaluator.coeff(i, j)) return false; return true; } -#else - enum { - unroll = SizeAtCompileTime != Dynamic - && CoeffReadCost != Dynamic - && NumTraits::AddCost != Dynamic - && SizeAtCompileTime * (CoeffReadCost + NumTraits::AddCost) <= EIGEN_UNROLLING_LIMIT - }; - - if(unroll) - return internal::all_unroller::run(derived()); - else - { - for(Index j = 0; j < cols(); ++j) - for(Index i = 0; i < rows(); ++i) - if (!coeff(i, j)) return false; - return true; - } -#endif } /** \returns true if at least one coefficient is true @@ -139,7 +106,6 @@ inline bool DenseBase::all() const template inline bool DenseBase::any() const { -#ifdef EIGEN_TEST_EVALUATORS typedef typename internal::evaluator::type Evaluator; enum { unroll = SizeAtCompileTime != Dynamic @@ -157,23 +123,6 @@ inline bool DenseBase::any() const if (evaluator.coeff(i, j)) return true; return false; } -#else - enum { - unroll = SizeAtCompileTime != Dynamic - && CoeffReadCost != Dynamic - && NumTraits::AddCost != Dynamic - && SizeAtCompileTime * (CoeffReadCost + NumTraits::AddCost) <= EIGEN_UNROLLING_LIMIT - }; - if(unroll) - return internal::any_unroller::run(derived()); - else - { - for(Index j = 0; j < cols(); ++j) - for(Index i = 0; i < rows(); ++i) - if (coeff(i, j)) return true; - return false; - } -#endif } /** \returns the number of coefficients which evaluate to true diff --git a/Eigen/src/Core/CwiseBinaryOp.h b/Eigen/src/Core/CwiseBinaryOp.h index b4662907e..de9109e53 100644 --- a/Eigen/src/Core/CwiseBinaryOp.h +++ b/Eigen/src/Core/CwiseBinaryOp.h @@ -66,28 +66,7 @@ struct traits > typedef typename remove_reference::type _LhsNested; typedef typename remove_reference::type _RhsNested; enum { -#ifndef EIGEN_TEST_EVALUATORS - LhsFlags = _LhsNested::Flags, - RhsFlags = _RhsNested::Flags, - SameType = is_same::value, - StorageOrdersAgree = (int(Lhs::Flags)&RowMajorBit)==(int(Rhs::Flags)&RowMajorBit), - Flags0 = (int(LhsFlags) | int(RhsFlags)) & ( - HereditaryBits - | (int(LhsFlags) & int(RhsFlags) & - ( AlignedBit - | (StorageOrdersAgree ? LinearAccessBit : 0) - | (functor_traits::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0) - ) - ) - ), - Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit), - - LhsCoeffReadCost = _LhsNested::CoeffReadCost, - RhsCoeffReadCost = _RhsNested::CoeffReadCost, - CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + functor_traits::Cost -#else Flags = _LhsNested::Flags & RowMajorBit -#endif }; }; } // end namespace internal @@ -163,46 +142,6 @@ class CwiseBinaryOp : internal::no_assignment_operator, const BinaryOp m_functor; }; -#ifndef EIGEN_TEST_EVALUATORS -template -class CwiseBinaryOpImpl - : public internal::dense_xpr_base >::type -{ - typedef CwiseBinaryOp Derived; - public: - - typedef typename internal::dense_xpr_base >::type Base; - EIGEN_DENSE_PUBLIC_INTERFACE( Derived ) - - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE const Scalar coeff(Index rowId, Index colId) const - { - return derived().functor()(derived().lhs().coeff(rowId, colId), - derived().rhs().coeff(rowId, colId)); - } - - template - EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const - { - return derived().functor().packetOp(derived().lhs().template packet(rowId, colId), - derived().rhs().template packet(rowId, colId)); - } - - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE const Scalar coeff(Index index) const - { - return derived().functor()(derived().lhs().coeff(index), - derived().rhs().coeff(index)); - } - - template - EIGEN_STRONG_INLINE PacketScalar packet(Index index) const - { - return derived().functor().packetOp(derived().lhs().template packet(index), - derived().rhs().template packet(index)); - } -}; -#else // Generic API dispatcher template class CwiseBinaryOpImpl @@ -211,7 +150,6 @@ class CwiseBinaryOpImpl public: typedef typename internal::generic_xpr_base >::type Base; }; -#endif /** replaces \c *this by \c *this - \a other. * @@ -222,12 +160,7 @@ template EIGEN_STRONG_INLINE Derived & MatrixBase::operator-=(const MatrixBase &other) { -#ifdef EIGEN_TEST_EVALUATORS call_assignment(derived(), other.derived(), internal::sub_assign_op()); -#else - SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); - tmp = other.derived(); -#endif return derived(); } @@ -240,12 +173,7 @@ template EIGEN_STRONG_INLINE Derived & MatrixBase::operator+=(const MatrixBase& other) { -#ifdef EIGEN_TEST_EVALUATORS call_assignment(derived(), other.derived(), internal::add_assign_op()); -#else - SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); - tmp = other.derived(); -#endif return derived(); } diff --git a/Eigen/src/Core/CwiseNullaryOp.h b/Eigen/src/Core/CwiseNullaryOp.h index f9f127cc2..8b8397da6 100644 --- a/Eigen/src/Core/CwiseNullaryOp.h +++ b/Eigen/src/Core/CwiseNullaryOp.h @@ -35,16 +35,7 @@ template struct traits > : traits { enum { -#ifndef EIGEN_TEST_EVALUATORS - Flags = (traits::Flags - & ( HereditaryBits - | (functor_has_linear_access::ret ? LinearAccessBit : 0) - | (functor_traits::PacketAccess ? PacketAccessBit : 0))) - | (functor_traits::IsRepeatable ? 0 : EvalBeforeNestingBit), - CoeffReadCost = functor_traits::Cost -#else Flags = traits::Flags & RowMajorBit -#endif }; }; } diff --git a/Eigen/src/Core/CwiseUnaryOp.h b/Eigen/src/Core/CwiseUnaryOp.h index c2bc47c93..79a872934 100644 --- a/Eigen/src/Core/CwiseUnaryOp.h +++ b/Eigen/src/Core/CwiseUnaryOp.h @@ -44,14 +44,7 @@ struct traits > typedef typename XprType::Nested XprTypeNested; typedef typename remove_reference::type _XprTypeNested; enum { -#ifndef EIGEN_TEST_EVALUATORS - Flags = _XprTypeNested::Flags & ( - HereditaryBits | LinearAccessBit | AlignedBit - | (functor_traits::PacketAccess ? PacketAccessBit : 0)), - CoeffReadCost = _XprTypeNested::CoeffReadCost + functor_traits::Cost -#else Flags = _XprTypeNested::Flags & RowMajorBit -#endif }; }; } @@ -97,45 +90,6 @@ class CwiseUnaryOp : internal::no_assignment_operator, const UnaryOp m_functor; }; -#ifndef EIGEN_TEST_EVALUATORS -// This is the generic implementation for dense storage. -// It can be used for any expression types implementing the dense concept. -template -class CwiseUnaryOpImpl - : public internal::dense_xpr_base >::type -{ - public: - - typedef CwiseUnaryOp Derived; - typedef typename internal::dense_xpr_base >::type Base; - EIGEN_DENSE_PUBLIC_INTERFACE(Derived) - - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE const Scalar coeff(Index rowId, Index colId) const - { - return derived().functor()(derived().nestedExpression().coeff(rowId, colId)); - } - - template - EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const - { - return derived().functor().packetOp(derived().nestedExpression().template packet(rowId, colId)); - } - - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE const Scalar coeff(Index index) const - { - return derived().functor()(derived().nestedExpression().coeff(index)); - } - - template - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE PacketScalar packet(Index index) const - { - return derived().functor().packetOp(derived().nestedExpression().template packet(index)); - } -}; -#else // EIGEN_TEST_EVALUATORS // Generic API dispatcher template class CwiseUnaryOpImpl @@ -144,7 +98,6 @@ class CwiseUnaryOpImpl public: typedef typename internal::generic_xpr_base >::type Base; }; -#endif // EIGEN_TEST_EVALUATORS } // end namespace Eigen diff --git a/Eigen/src/Core/CwiseUnaryView.h b/Eigen/src/Core/CwiseUnaryView.h index 99cc03ac1..71249a39c 100644 --- a/Eigen/src/Core/CwiseUnaryView.h +++ b/Eigen/src/Core/CwiseUnaryView.h @@ -37,12 +37,7 @@ struct traits > typedef typename MatrixType::Nested MatrixTypeNested; typedef typename remove_all::type _MatrixTypeNested; enum { -#ifndef EIGEN_TEST_EVALUATORS - Flags = (traits<_MatrixTypeNested>::Flags & (HereditaryBits | LvalueBit | LinearAccessBit | DirectAccessBit)), - CoeffReadCost = traits<_MatrixTypeNested>::CoeffReadCost + functor_traits::Cost, -#else Flags = traits<_MatrixTypeNested>::Flags & (RowMajorBit | LvalueBit | DirectAccessBit), // FIXME DirectAccessBit should not be handled by expressions -#endif MatrixTypeInnerStride = inner_stride_at_compile_time::ret, // need to cast the sizeof's from size_t to int explicitly, otherwise: // "error: no integral type can represent all of the enumerator values @@ -93,7 +88,6 @@ class CwiseUnaryView : public CwiseUnaryViewImpl class CwiseUnaryViewImpl @@ -102,7 +96,6 @@ class CwiseUnaryViewImpl public: typedef typename internal::generic_xpr_base >::type Base; }; -#endif template class CwiseUnaryViewImpl @@ -128,30 +121,6 @@ class CwiseUnaryViewImpl { return derived().nestedExpression().outerStride() * sizeof(typename internal::traits::Scalar) / sizeof(Scalar); } - -#ifndef EIGEN_TEST_EVALUATORS - - EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const - { - return derived().functor()(derived().nestedExpression().coeff(row, col)); - } - - EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const - { - return derived().functor()(derived().nestedExpression().coeff(index)); - } - - EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) - { - return derived().functor()(const_cast_derived().nestedExpression().coeffRef(row, col)); - } - - EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) - { - return derived().functor()(const_cast_derived().nestedExpression().coeffRef(index)); - } - -#endif }; } // end namespace Eigen diff --git a/Eigen/src/Core/DenseBase.h b/Eigen/src/Core/DenseBase.h index f35c2edc4..6078af553 100644 --- a/Eigen/src/Core/DenseBase.h +++ b/Eigen/src/Core/DenseBase.h @@ -74,18 +74,6 @@ template class DenseBase using Base::colIndexByOuterInner; using Base::coeff; using Base::coeffByOuterInner; -#ifndef EIGEN_TEST_EVALUATORS - using Base::packet; - using Base::packetByOuterInner; - using Base::writePacket; - using Base::writePacketByOuterInner; - using Base::coeffRef; - using Base::coeffRefByOuterInner; - using Base::copyCoeff; - using Base::copyCoeffByOuterInner; - using Base::copyPacket; - using Base::copyPacketByOuterInner; -#endif using Base::operator(); using Base::operator[]; using Base::x; @@ -171,13 +159,6 @@ template class DenseBase InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime) : int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime), -#ifndef EIGEN_TEST_EVALUATORS - CoeffReadCost = internal::traits::CoeffReadCost, - /**< This is a rough measure of how expensive it is to read one coefficient from - * this expression. - */ -#endif - InnerStrideAtCompileTime = internal::inner_stride_at_compile_time::ret, OuterStrideAtCompileTime = internal::outer_stride_at_compile_time::ret }; @@ -292,15 +273,10 @@ template class DenseBase EIGEN_DEVICE_FUNC CommaInitializer operator<< (const Scalar& s); -#ifndef EIGEN_TEST_EVALUATORS - template - const Flagged flagged() const; -#else // TODO flagged is temporarly disabled. It seems useless now template const Derived& flagged() const { return derived(); } -#endif template EIGEN_DEVICE_FUNC @@ -313,15 +289,6 @@ template class DenseBase ConstTransposeReturnType transpose() const; EIGEN_DEVICE_FUNC void transposeInPlace(); -#ifndef EIGEN_NO_DEBUG -#ifndef EIGEN_TEST_EVALUATORS - protected: - template - void checkTransposeAliasing(const OtherDerived& other) const; - public: -#endif -#endif - EIGEN_DEVICE_FUNC static const ConstantReturnType Constant(Index rows, Index cols, const Scalar& value); @@ -402,7 +369,6 @@ template class DenseBase return typename internal::eval::type(derived()); } -#ifdef EIGEN_TEST_EVALUATORS /** swaps *this with the expression \a other. * */ @@ -425,28 +391,6 @@ template class DenseBase eigen_assert(rows()==other.rows() && cols()==other.cols()); call_assignment(derived(), other.derived(), internal::swap_assign_op()); } -#else // EIGEN_TEST_EVALUATORS - /** swaps *this with the expression \a other. - * - */ - template - EIGEN_DEVICE_FUNC - void swap(const DenseBase& other, - int = OtherDerived::ThisConstantIsPrivateInPlainObjectBase) - { - SwapWrapper(derived()).lazyAssign(other.derived()); - } - - /** swaps *this with the matrix or array \a other. - * - */ - template - EIGEN_DEVICE_FUNC - void swap(PlainObjectBase& other) - { - SwapWrapper(derived()).lazyAssign(other.derived()); - } -#endif // EIGEN_TEST_EVALUATORS EIGEN_DEVICE_FUNC inline const NestByValue nestByValue() const; EIGEN_DEVICE_FUNC inline const ForceAlignedAccess forceAlignedAccess() const; diff --git a/Eigen/src/Core/DenseCoeffsBase.h b/Eigen/src/Core/DenseCoeffsBase.h index de31b8df2..a9e4dbaf9 100644 --- a/Eigen/src/Core/DenseCoeffsBase.h +++ b/Eigen/src/Core/DenseCoeffsBase.h @@ -98,11 +98,7 @@ class DenseCoeffsBase : public EigenBase { eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); -#ifndef EIGEN_TEST_EVALUATORS - return derived().coeff(row, col); -#else return typename internal::evaluator::type(derived()).coeff(row,col); -#endif } EIGEN_DEVICE_FUNC @@ -144,11 +140,7 @@ class DenseCoeffsBase : public EigenBase coeff(Index index) const { eigen_internal_assert(index >= 0 && index < size()); -#ifndef EIGEN_TEST_EVALUATORS - return derived().coeff(index); -#else return typename internal::evaluator::type(derived()).coeff(index); -#endif } @@ -226,11 +218,7 @@ class DenseCoeffsBase : public EigenBase EIGEN_STRONG_INLINE PacketReturnType packet(Index row, Index col) const { eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); -#ifndef EIGEN_TEST_EVALUATORS - return derived().template packet(row,col); -#else return typename internal::evaluator::type(derived()).template packet(row,col); -#endif } @@ -256,11 +244,7 @@ class DenseCoeffsBase : public EigenBase EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { eigen_internal_assert(index >= 0 && index < size()); -#ifndef EIGEN_TEST_EVALUATORS - return derived().template packet(index); -#else return typename internal::evaluator::type(derived()).template packet(index); -#endif } protected: @@ -341,11 +325,7 @@ class DenseCoeffsBase : public DenseCoeffsBase= 0 && row < rows() && col >= 0 && col < cols()); -#ifndef EIGEN_TEST_EVALUATORS - return derived().coeffRef(row, col); -#else return typename internal::evaluator::type(derived()).coeffRef(row,col); -#endif } EIGEN_DEVICE_FUNC @@ -391,11 +371,7 @@ class DenseCoeffsBase : public DenseCoeffsBase= 0 && index < size()); -#ifndef EIGEN_TEST_EVALUATORS - return derived().coeffRef(index); -#else return typename internal::evaluator::type(derived()).coeffRef(index); -#endif } /** \returns a reference to the coefficient at given index. @@ -455,146 +431,6 @@ class DenseCoeffsBase : public DenseCoeffsBase - EIGEN_STRONG_INLINE void writePacket - (Index row, Index col, const typename internal::packet_traits::type& val) - { - eigen_internal_assert(row >= 0 && row < rows() - && col >= 0 && col < cols()); - derived().template writePacket(row,col,val); - } - - - /** \internal */ - template - EIGEN_STRONG_INLINE void writePacketByOuterInner - (Index outer, Index inner, const typename internal::packet_traits::type& val) - { - writePacket(rowIndexByOuterInner(outer, inner), - colIndexByOuterInner(outer, inner), - val); - } - - /** \internal - * Stores the given packet of coefficients, at the given index in this expression. It is your responsibility - * to ensure that a packet really starts there. This method is only available on expressions having the - * PacketAccessBit and the LinearAccessBit. - * - * The \a LoadMode parameter may have the value \a Aligned or \a Unaligned. Its effect is to select - * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets - * starting at an address which is a multiple of the packet size. - */ - template - EIGEN_STRONG_INLINE void writePacket - (Index index, const typename internal::packet_traits::type& val) - { - eigen_internal_assert(index >= 0 && index < size()); - derived().template writePacket(index,val); - } - -#ifndef EIGEN_PARSED_BY_DOXYGEN - - /** \internal Copies the coefficient at position (row,col) of other into *this. - * - * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code - * with usual assignments. - * - * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox. - */ - - template - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, const DenseBase& other) - { - eigen_internal_assert(row >= 0 && row < rows() - && col >= 0 && col < cols()); - derived().coeffRef(row, col) = other.derived().coeff(row, col); - } - - /** \internal Copies the coefficient at the given index of other into *this. - * - * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code - * with usual assignments. - * - * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox. - */ - - template - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE void copyCoeff(Index index, const DenseBase& other) - { - eigen_internal_assert(index >= 0 && index < size()); - derived().coeffRef(index) = other.derived().coeff(index); - } - - - template - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE void copyCoeffByOuterInner(Index outer, Index inner, const DenseBase& other) - { - const Index row = rowIndexByOuterInner(outer,inner); - const Index col = colIndexByOuterInner(outer,inner); - // derived() is important here: copyCoeff() may be reimplemented in Derived! - derived().copyCoeff(row, col, other); - } - - /** \internal Copies the packet at position (row,col) of other into *this. - * - * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code - * with usual assignments. - * - * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox. - */ - - template - EIGEN_STRONG_INLINE void copyPacket(Index row, Index col, const DenseBase& other) - { - eigen_internal_assert(row >= 0 && row < rows() - && col >= 0 && col < cols()); - derived().template writePacket(row, col, - other.derived().template packet(row, col)); - } - - /** \internal Copies the packet at the given index of other into *this. - * - * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code - * with usual assignments. - * - * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox. - */ - - template - EIGEN_STRONG_INLINE void copyPacket(Index index, const DenseBase& other) - { - eigen_internal_assert(index >= 0 && index < size()); - derived().template writePacket(index, - other.derived().template packet(index)); - } - - /** \internal */ - template - EIGEN_STRONG_INLINE void copyPacketByOuterInner(Index outer, Index inner, const DenseBase& other) - { - const Index row = rowIndexByOuterInner(outer,inner); - const Index col = colIndexByOuterInner(outer,inner); - // derived() is important here: copyCoeff() may be reimplemented in Derived! - derived().template copyPacket< OtherDerived, StoreMode, LoadMode>(row, col, other); - } -#endif -#endif // EIGEN_TEST_EVALUATORS - }; /** \brief Base class providing direct read-only coefficient access to matrices and arrays. diff --git a/Eigen/src/Core/Diagonal.h b/Eigen/src/Core/Diagonal.h index 3ff6a3e66..1ffcd97f9 100644 --- a/Eigen/src/Core/Diagonal.h +++ b/Eigen/src/Core/Diagonal.h @@ -51,14 +51,8 @@ struct traits > : (EIGEN_PLAIN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0), MatrixType::MaxColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))), MaxColsAtCompileTime = 1, -#ifndef EIGEN_TEST_EVALUATORS - MaskLvalueBit = is_lvalue::value ? LvalueBit : 0, - Flags = (unsigned int)_MatrixTypeNested::Flags & (HereditaryBits | LinearAccessBit | MaskLvalueBit | DirectAccessBit) & ~RowMajorBit, - CoeffReadCost = _MatrixTypeNested::CoeffReadCost, -#else MaskLvalueBit = is_lvalue::value ? LvalueBit : 0, Flags = (unsigned int)_MatrixTypeNested::Flags & (RowMajorBit | MaskLvalueBit | DirectAccessBit) & ~RowMajorBit, // FIXME DirectAccessBit should not be handled by expressions -#endif MatrixTypeOuterStride = outer_stride_at_compile_time::ret, InnerStrideAtCompileTime = MatrixTypeOuterStride == Dynamic ? Dynamic : MatrixTypeOuterStride+1, OuterStrideAtCompileTime = 0 diff --git a/Eigen/src/Core/DiagonalMatrix.h b/Eigen/src/Core/DiagonalMatrix.h index 801131b54..44c249aa6 100644 --- a/Eigen/src/Core/DiagonalMatrix.h +++ b/Eigen/src/Core/DiagonalMatrix.h @@ -45,20 +45,6 @@ class DiagonalBase : public EigenBase EIGEN_DEVICE_FUNC DenseMatrixType toDenseMatrix() const { return derived(); } -#ifndef EIGEN_TEST_EVALUATORS - template - EIGEN_DEVICE_FUNC - void evalTo(MatrixBase &other) const; - template - EIGEN_DEVICE_FUNC - void addTo(MatrixBase &other) const - { other.diagonal() += diagonal(); } - template - EIGEN_DEVICE_FUNC - void subTo(MatrixBase &other) const - { other.diagonal() -= diagonal(); } -#endif // EIGEN_TEST_EVALUATORS - EIGEN_DEVICE_FUNC inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); } EIGEN_DEVICE_FUNC @@ -69,17 +55,6 @@ class DiagonalBase : public EigenBase EIGEN_DEVICE_FUNC inline Index cols() const { return diagonal().size(); } -#ifndef EIGEN_TEST_EVALUATORS - /** \returns the diagonal matrix product of \c *this by the matrix \a matrix. - */ - template - EIGEN_DEVICE_FUNC - const DiagonalProduct - operator*(const MatrixBase &matrix) const - { - return DiagonalProduct(matrix.derived(), derived()); - } -#else template EIGEN_DEVICE_FUNC const Product @@ -87,7 +62,6 @@ class DiagonalBase : public EigenBase { return Product(derived(),matrix.derived()); } -#endif // EIGEN_TEST_EVALUATORS EIGEN_DEVICE_FUNC inline const DiagonalWrapper, const DiagonalVectorType> > @@ -110,16 +84,6 @@ class DiagonalBase : public EigenBase } }; -#ifndef EIGEN_TEST_EVALUATORS -template -template -void DiagonalBase::evalTo(MatrixBase &other) const -{ - other.setZero(); - other.diagonal() = diagonal(); -} -#endif // EIGEN_TEST_EVALUATORS - #endif /** \class DiagonalMatrix @@ -273,10 +237,6 @@ struct traits > MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime, MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime, Flags = (traits::Flags & LvalueBit) | NoPreferredStorageOrderBit -#ifndef EIGEN_TEST_EVALUATORS - , - CoeffReadCost = traits<_DiagonalVectorType>::CoeffReadCost -#endif }; }; } @@ -347,7 +307,6 @@ bool MatrixBase::isDiagonal(const RealScalar& prec) const return true; } -#ifdef EIGEN_ENABLE_EVALUATORS namespace internal { template<> struct storage_kind_to_shape { typedef DiagonalShape Shape; }; @@ -368,7 +327,6 @@ struct Assignment }; } // namespace internal -#endif // EIGEN_ENABLE_EVALUATORS } // end namespace Eigen diff --git a/Eigen/src/Core/DiagonalProduct.h b/Eigen/src/Core/DiagonalProduct.h index c6dafdddc..d372b938f 100644 --- a/Eigen/src/Core/DiagonalProduct.h +++ b/Eigen/src/Core/DiagonalProduct.h @@ -13,122 +13,6 @@ namespace Eigen { -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { -template -struct traits > - : traits -{ - typedef typename scalar_product_traits::ReturnType Scalar; - enum { - RowsAtCompileTime = MatrixType::RowsAtCompileTime, - ColsAtCompileTime = MatrixType::ColsAtCompileTime, - MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, - MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, - -#ifndef EIGEN_TEST_EVALUATORS - _StorageOrder = MatrixType::Flags & RowMajorBit ? RowMajor : ColMajor, - _ScalarAccessOnDiag = !((int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheLeft) - ||(int(_StorageOrder) == RowMajor && int(ProductOrder) == OnTheRight)), - _SameTypes = is_same::value, - // FIXME currently we need same types, but in the future the next rule should be the one - //_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && ((!_PacketOnDiag) || (_SameTypes && bool(int(DiagonalType::DiagonalVectorType::Flags)&PacketAccessBit))), - _Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && (_ScalarAccessOnDiag || (bool(int(DiagonalType::DiagonalVectorType::Flags)&PacketAccessBit))), - _LinearAccessMask = (RowsAtCompileTime==1 || ColsAtCompileTime==1) ? LinearAccessBit : 0, - Flags = ((HereditaryBits|_LinearAccessMask) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0) | AlignedBit, //(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit), - CoeffReadCost = NumTraits::MulCost + MatrixType::CoeffReadCost + DiagonalType::DiagonalVectorType::CoeffReadCost -#else - Flags = RowMajorBit & (unsigned int)(MatrixType::Flags) -#endif - }; -}; -} - -template -class DiagonalProduct : internal::no_assignment_operator, - public MatrixBase > -{ - public: - - typedef MatrixBase Base; - EIGEN_DENSE_PUBLIC_INTERFACE(DiagonalProduct) - - inline DiagonalProduct(const MatrixType& matrix, const DiagonalType& diagonal) - : m_matrix(matrix), m_diagonal(diagonal) - { - eigen_assert(diagonal.diagonal().size() == (ProductOrder == OnTheLeft ? matrix.rows() : matrix.cols())); - } - - EIGEN_STRONG_INLINE Index rows() const { return m_matrix.rows(); } - EIGEN_STRONG_INLINE Index cols() const { return m_matrix.cols(); } - - EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const - { - return m_diagonal.diagonal().coeff(ProductOrder == OnTheLeft ? row : col) * m_matrix.coeff(row, col); - } - - EIGEN_STRONG_INLINE const Scalar coeff(Index idx) const - { - enum { - StorageOrder = int(MatrixType::Flags) & RowMajorBit ? RowMajor : ColMajor - }; - return coeff(int(StorageOrder)==ColMajor?idx:0,int(StorageOrder)==ColMajor?0:idx); - } - - template - EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const - { - enum { - StorageOrder = Flags & RowMajorBit ? RowMajor : ColMajor - }; - const Index indexInDiagonalVector = ProductOrder == OnTheLeft ? row : col; - return packet_impl(row,col,indexInDiagonalVector,typename internal::conditional< - ((int(StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft) - ||(int(StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)), internal::true_type, internal::false_type>::type()); - } - - template - EIGEN_STRONG_INLINE PacketScalar packet(Index idx) const - { - enum { - StorageOrder = int(MatrixType::Flags) & RowMajorBit ? RowMajor : ColMajor - }; - return packet(int(StorageOrder)==ColMajor?idx:0,int(StorageOrder)==ColMajor?0:idx); - } - - protected: - template - EIGEN_STRONG_INLINE PacketScalar packet_impl(Index row, Index col, Index id, internal::true_type) const - { - return internal::pmul(m_matrix.template packet(row, col), - internal::pset1(m_diagonal.diagonal().coeff(id))); - } - - template - EIGEN_STRONG_INLINE PacketScalar packet_impl(Index row, Index col, Index id, internal::false_type) const - { - enum { - InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime, - DiagonalVectorPacketLoadMode = (LoadMode == Aligned && (((InnerSize%16) == 0) || (int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit)==AlignedBit) ? Aligned : Unaligned) - }; - return internal::pmul(m_matrix.template packet(row, col), - m_diagonal.diagonal().template packet(id)); - } - - typename MatrixType::Nested m_matrix; - typename DiagonalType::Nested m_diagonal; -}; - -/** \returns the diagonal matrix product of \c *this by the diagonal matrix \a diagonal. - */ -template -template -inline const DiagonalProduct -MatrixBase::operator*(const DiagonalBase &a_diagonal) const -{ - return DiagonalProduct(derived(), a_diagonal.derived()); -} -#else // EIGEN_TEST_EVALUATORS /** \returns the diagonal matrix product of \c *this by the diagonal matrix \a diagonal. */ template @@ -138,7 +22,6 @@ MatrixBase::operator*(const DiagonalBase &a_diagonal) { return Product(derived(),a_diagonal.derived()); } -#endif // EIGEN_TEST_EVALUATORS } // end namespace Eigen diff --git a/Eigen/src/Core/Dot.h b/Eigen/src/Core/Dot.h index d6441c6a5..68e9c2660 100644 --- a/Eigen/src/Core/Dot.h +++ b/Eigen/src/Core/Dot.h @@ -113,13 +113,7 @@ template inline const typename MatrixBase::PlainObject MatrixBase::normalized() const { -#ifndef EIGEN_TEST_EVALUATORS - typedef typename internal::nested::type Nested; - typedef typename internal::remove_reference::type _Nested; -#else typedef typename internal::nested_eval::type _Nested; -// typedef typename internal::remove_reference::type _Nested; -#endif // EIGEN_TEST_EVALUATORS _Nested n(derived()); return n / n.norm(); } @@ -211,13 +205,8 @@ template bool MatrixBase::isOrthogonal (const MatrixBase& other, const RealScalar& prec) const { -#ifndef EIGEN_TEST_EVALUATORS - typename internal::nested::type nested(derived()); - typename internal::nested::type otherNested(other.derived()); -#else typename internal::nested_eval::type nested(derived()); typename internal::nested_eval::type otherNested(other.derived()); -#endif return numext::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm(); } diff --git a/Eigen/src/Core/EigenBase.h b/Eigen/src/Core/EigenBase.h index 986e2a196..52b66e6dc 100644 --- a/Eigen/src/Core/EigenBase.h +++ b/Eigen/src/Core/EigenBase.h @@ -121,11 +121,7 @@ template template Derived& DenseBase::operator=(const EigenBase &other) { -#ifndef EIGEN_TEST_EVALUATORS - other.derived().evalTo(derived()); -#else call_assignment(derived(), other.derived()); -#endif return derived(); } @@ -133,11 +129,7 @@ template template Derived& DenseBase::operator+=(const EigenBase &other) { -#ifndef EIGEN_TEST_EVALUATORS - other.derived().addTo(derived()); -#else call_assignment(derived(), other.derived(), internal::add_assign_op()); -#endif return derived(); } @@ -145,11 +137,7 @@ template template Derived& DenseBase::operator-=(const EigenBase &other) { -#ifndef EIGEN_TEST_EVALUATORS - other.derived().subTo(derived()); -#else call_assignment(derived(), other.derived(), internal::sub_assign_op()); -#endif return derived(); } diff --git a/Eigen/src/Core/Fuzzy.h b/Eigen/src/Core/Fuzzy.h index 9c8d10683..8cd069a0d 100644 --- a/Eigen/src/Core/Fuzzy.h +++ b/Eigen/src/Core/Fuzzy.h @@ -23,13 +23,8 @@ struct isApprox_selector static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec) { EIGEN_USING_STD_MATH(min); -#ifdef EIGEN_TEST_EVALUATORS typename internal::nested_eval::type nested(x); typename internal::nested_eval::type otherNested(y); -#else - typename internal::nested::type nested(x); - typename internal::nested::type otherNested(y); -#endif return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * (min)(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum()); } }; diff --git a/Eigen/src/Core/GeneralProduct.h b/Eigen/src/Core/GeneralProduct.h index abbf69549..e05ff8dce 100644 --- a/Eigen/src/Core/GeneralProduct.h +++ b/Eigen/src/Core/GeneralProduct.h @@ -13,31 +13,6 @@ namespace Eigen { -#ifndef EIGEN_TEST_EVALUATORS -/** \class GeneralProduct - * \ingroup Core_Module - * - * \brief Expression of the product of two general matrices or vectors - * - * \param LhsNested the type used to store the left-hand side - * \param RhsNested the type used to store the right-hand side - * \param ProductMode the type of the product - * - * This class represents an expression of the product of two general matrices. - * We call a general matrix, a dense matrix with full storage. For instance, - * This excludes triangular, selfadjoint, and sparse matrices. - * It is the return type of the operator* between general matrices. Its template - * arguments are determined automatically by ProductReturnType. Therefore, - * GeneralProduct should never be used direclty. To determine the result type of a - * function which involves a matrix product, use ProductReturnType::Type. - * - * \sa ProductReturnType, MatrixBase::operator*(const MatrixBase&) - */ -template::value> -class GeneralProduct; -#endif // EIGEN_TEST_EVALUATORS - - enum { Large = 2, Small = 3 @@ -156,56 +131,6 @@ template<> struct product_type_selector { enum } // end namespace internal -#ifndef EIGEN_TEST_EVALUATORS -/** \class ProductReturnType - * \ingroup Core_Module - * - * \brief Helper class to get the correct and optimized returned type of operator* - * - * \param Lhs the type of the left-hand side - * \param Rhs the type of the right-hand side - * \param ProductMode the type of the product (determined automatically by internal::product_mode) - * - * This class defines the typename Type representing the optimized product expression - * between two matrix expressions. In practice, using ProductReturnType::Type - * is the recommended way to define the result type of a function returning an expression - * which involve a matrix product. The class Product should never be - * used directly. - * - * \sa class Product, MatrixBase::operator*(const MatrixBase&) - */ -template -struct ProductReturnType -{ - // TODO use the nested type to reduce instanciations ???? -// typedef typename internal::nested::type LhsNested; -// typedef typename internal::nested::type RhsNested; - - typedef GeneralProduct Type; -}; - -template -struct ProductReturnType -{ - typedef typename internal::nested::type >::type LhsNested; - typedef typename internal::nested::type >::type RhsNested; - typedef CoeffBasedProduct Type; -}; - -template -struct ProductReturnType -{ - typedef typename internal::nested::type >::type LhsNested; - typedef typename internal::nested::type >::type RhsNested; - typedef CoeffBasedProduct Type; -}; - -// this is a workaround for sun CC -template -struct LazyProductReturnType : public ProductReturnType -{}; -#endif - /*********************************************************************** * Implementation of Inner Vector Vector Product ***********************************************************************/ @@ -216,124 +141,11 @@ struct LazyProductReturnType : public ProductReturnType with: operator=(Scalar x); -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - -template -struct traits > - : traits::ReturnType,1,1> > -{}; - -} - -template -class GeneralProduct - : internal::no_assignment_operator, - public Matrix::ReturnType,1,1> -{ - typedef Matrix::ReturnType,1,1> Base; - public: - GeneralProduct(const Lhs& lhs, const Rhs& rhs) - { - EIGEN_STATIC_ASSERT((internal::is_same::value), - YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) - - Base::coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum(); - } - - /** Convertion to scalar */ - operator const typename Base::Scalar() const { - return Base::coeff(0,0); - } -}; -#endif // EIGEN_TEST_EVALUATORS /*********************************************************************** * Implementation of Outer Vector Vector Product ***********************************************************************/ -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - -// Column major -template -EIGEN_DONT_INLINE void outer_product_selector_run(const ProductType& prod, Dest& dest, const Func& func, const false_type&) -{ - typedef typename Dest::Index Index; - // FIXME make sure lhs is sequentially stored - // FIXME not very good if rhs is real and lhs complex while alpha is real too - const Index cols = dest.cols(); - for (Index j=0; j -EIGEN_DONT_INLINE void outer_product_selector_run(const ProductType& prod, Dest& dest, const Func& func, const true_type&) { - typedef typename Dest::Index Index; - // FIXME make sure rhs is sequentially stored - // FIXME not very good if lhs is real and rhs complex while alpha is real too - const Index rows = dest.rows(); - for (Index i=0; i -struct traits > - : traits, Lhs, Rhs> > -{}; - -} - -template -class GeneralProduct - : public ProductBase, Lhs, Rhs> -{ - template struct IsRowMajor : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {}; - - public: - EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct) - - GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) - { - EIGEN_STATIC_ASSERT((internal::is_same::value), - YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) - } - - struct set { template void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() = src; } }; - struct add { template void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() += src; } }; - struct sub { template void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() -= src; } }; - struct adds { - Scalar m_scale; - adds(const Scalar& s) : m_scale(s) {} - template void operator()(const Dst& dst, const Src& src) const { - dst.const_cast_derived() += m_scale * src; - } - }; - - template - inline void evalTo(Dest& dest) const { - internal::outer_product_selector_run(*this, dest, set(), IsRowMajor()); - } - - template - inline void addTo(Dest& dest) const { - internal::outer_product_selector_run(*this, dest, add(), IsRowMajor()); - } - - template - inline void subTo(Dest& dest) const { - internal::outer_product_selector_run(*this, dest, sub(), IsRowMajor()); - } - - template void scaleAndAddTo(Dest& dest, const Scalar& alpha) const - { - internal::outer_product_selector_run(*this, dest, adds(alpha), IsRowMajor()); - } -}; - -#endif // EIGEN_TEST_EVALUATORS - /*********************************************************************** * Implementation of General Matrix Vector Product ***********************************************************************/ @@ -347,50 +159,11 @@ class GeneralProduct */ namespace internal { -#ifndef EIGEN_TEST_EVALUATORS -template -struct traits > - : traits, Lhs, Rhs> > -{}; -template -struct gemv_selector; -#endif -#ifdef EIGEN_ENABLE_EVALUATORS template struct gemv_dense_sense_selector; -#endif } // end namespace internal -#ifndef EIGEN_TEST_EVALUATORS -template -class GeneralProduct - : public ProductBase, Lhs, Rhs> -{ - public: - EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct) - - typedef typename Lhs::Scalar LhsScalar; - typedef typename Rhs::Scalar RhsScalar; - - GeneralProduct(const Lhs& a_lhs, const Rhs& a_rhs) : Base(a_lhs,a_rhs) - { -// EIGEN_STATIC_ASSERT((internal::is_same::value), -// YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) - } - - enum { Side = Lhs::IsVectorAtCompileTime ? OnTheLeft : OnTheRight }; - typedef typename internal::conditional::type MatrixType; - - template void scaleAndAddTo(Dest& dst, const Scalar& alpha) const - { - eigen_assert(m_lhs.rows() == dst.rows() && m_rhs.cols() == dst.cols()); - internal::gemv_selector::HasUsableDirectAccess)>::run(*this, dst, alpha); - } -}; -#endif - namespace internal { template struct gemv_static_vector_if; @@ -429,177 +202,6 @@ struct gemv_static_vector_if #endif }; -#ifndef EIGEN_TEST_EVALUATORS - -// The vector is on the left => transposition -template -struct gemv_selector -{ - template - static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha) - { - Transpose destT(dest); - enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor }; - gemv_selector - ::run(GeneralProduct,Transpose, GemvProduct> - (prod.rhs().transpose(), prod.lhs().transpose()), destT, alpha); - } -}; - -template<> struct gemv_selector -{ - template - static inline void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha) - { - typedef typename ProductType::Index Index; - typedef typename ProductType::LhsScalar LhsScalar; - typedef typename ProductType::RhsScalar RhsScalar; - typedef typename ProductType::Scalar ResScalar; - typedef typename ProductType::RealScalar RealScalar; - typedef typename ProductType::ActualLhsType ActualLhsType; - typedef typename ProductType::ActualRhsType ActualRhsType; - typedef typename ProductType::LhsBlasTraits LhsBlasTraits; - typedef typename ProductType::RhsBlasTraits RhsBlasTraits; - typedef Map, Aligned> MappedDest; - - ActualLhsType actualLhs = LhsBlasTraits::extract(prod.lhs()); - ActualRhsType actualRhs = RhsBlasTraits::extract(prod.rhs()); - - ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs()) - * RhsBlasTraits::extractScalarFactor(prod.rhs()); - - enum { - // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 - // on, the other hand it is good for the cache to pack the vector anyways... - EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1, - ComplexByReal = (NumTraits::IsComplex) && (!NumTraits::IsComplex), - MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal - }; - - gemv_static_vector_if static_dest; - - bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0)); - bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible; - - RhsScalar compatibleAlpha = get_factor::run(actualAlpha); - - ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(), - evalToDest ? dest.data() : static_dest.data()); - - if(!evalToDest) - { - #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN - Index size = dest.size(); - EIGEN_DENSE_STORAGE_CTOR_PLUGIN - #endif - if(!alphaIsCompatible) - { - MappedDest(actualDestPtr, dest.size()).setZero(); - compatibleAlpha = RhsScalar(1); - } - else - MappedDest(actualDestPtr, dest.size()) = dest; - } - - general_matrix_vector_product - ::run( - actualLhs.rows(), actualLhs.cols(), - actualLhs.data(), actualLhs.outerStride(), - actualRhs.data(), actualRhs.innerStride(), - actualDestPtr, 1, - compatibleAlpha); - - if (!evalToDest) - { - if(!alphaIsCompatible) - dest += actualAlpha * MappedDest(actualDestPtr, dest.size()); - else - dest = MappedDest(actualDestPtr, dest.size()); - } - } -}; - -template<> struct gemv_selector -{ - template - static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha) - { - typedef typename ProductType::LhsScalar LhsScalar; - typedef typename ProductType::RhsScalar RhsScalar; - typedef typename ProductType::Scalar ResScalar; - typedef typename ProductType::Index Index; - typedef typename ProductType::ActualLhsType ActualLhsType; - typedef typename ProductType::ActualRhsType ActualRhsType; - typedef typename ProductType::_ActualRhsType _ActualRhsType; - typedef typename ProductType::LhsBlasTraits LhsBlasTraits; - typedef typename ProductType::RhsBlasTraits RhsBlasTraits; - - typename add_const::type actualLhs = LhsBlasTraits::extract(prod.lhs()); - typename add_const::type actualRhs = RhsBlasTraits::extract(prod.rhs()); - - ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs()) - * RhsBlasTraits::extractScalarFactor(prod.rhs()); - - enum { - // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 - // on, the other hand it is good for the cache to pack the vector anyways... - DirectlyUseRhs = _ActualRhsType::InnerStrideAtCompileTime==1 - }; - - gemv_static_vector_if static_rhs; - - ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,actualRhs.size(), - DirectlyUseRhs ? const_cast(actualRhs.data()) : static_rhs.data()); - - if(!DirectlyUseRhs) - { - #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN - Index size = actualRhs.size(); - EIGEN_DENSE_STORAGE_CTOR_PLUGIN - #endif - Map(actualRhsPtr, actualRhs.size()) = actualRhs; - } - - general_matrix_vector_product - ::run( - actualLhs.rows(), actualLhs.cols(), - actualLhs.data(), actualLhs.outerStride(), - actualRhsPtr, 1, - dest.data(), dest.innerStride(), - actualAlpha); - } -}; - -template<> struct gemv_selector -{ - template - static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha) - { - typedef typename Dest::Index Index; - // TODO makes sure dest is sequentially stored in memory, otherwise use a temp - const Index size = prod.rhs().rows(); - for(Index k=0; k struct gemv_selector -{ - template - static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha) - { - typedef typename Dest::Index Index; - // TODO makes sure rhs is sequentially stored in memory, otherwise use a temp - const Index rows = prod.rows(); - for(Index i=0; i transposition template struct gemv_dense_sense_selector @@ -767,8 +369,6 @@ template<> struct gemv_dense_sense_selector } }; -#endif // EIGEN_ENABLE_EVALUATORS - } // end namespace internal /*************************************************************************** @@ -783,7 +383,6 @@ template<> struct gemv_dense_sense_selector */ #ifndef __CUDACC__ -#ifdef EIGEN_TEST_EVALUATORS template template inline const Product @@ -814,37 +413,6 @@ MatrixBase::operator*(const MatrixBase &other) const return Product(derived(), other.derived()); } -#else // EIGEN_TEST_EVALUATORS -template -template -inline const typename ProductReturnType::Type -MatrixBase::operator*(const MatrixBase &other) const -{ - // A note regarding the function declaration: In MSVC, this function will sometimes - // not be inlined since DenseStorage is an unwindable object for dynamic - // matrices and product types are holding a member to store the result. - // Thus it does not help tagging this function with EIGEN_STRONG_INLINE. - enum { - ProductIsValid = Derived::ColsAtCompileTime==Dynamic - || OtherDerived::RowsAtCompileTime==Dynamic - || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime), - AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime, - SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived) - }; - // note to the lost user: - // * for a dot product use: v1.dot(v2) - // * for a coeff-wise product use: v1.cwiseProduct(v2) - EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), - INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) - EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), - INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) - EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) -#ifdef EIGEN_DEBUG_PRODUCT - internal::product_type::debug(); -#endif - return typename ProductReturnType::Type(derived(), other.derived()); -} -#endif // EIGEN_TEST_EVALUATORS #endif // __CUDACC__ @@ -859,7 +427,6 @@ MatrixBase::operator*(const MatrixBase &other) const * * \sa operator*(const MatrixBase&) */ -#ifdef EIGEN_TEST_EVALUATORS template template const Product @@ -883,31 +450,6 @@ MatrixBase::lazyProduct(const MatrixBase &other) const return Product(derived(), other.derived()); } -#else // EIGEN_TEST_EVALUATORS -template -template -const typename LazyProductReturnType::Type -MatrixBase::lazyProduct(const MatrixBase &other) const -{ - enum { - ProductIsValid = Derived::ColsAtCompileTime==Dynamic - || OtherDerived::RowsAtCompileTime==Dynamic - || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime), - AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime, - SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived) - }; - // note to the lost user: - // * for a dot product use: v1.dot(v2) - // * for a coeff-wise product use: v1.cwiseProduct(v2) - EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), - INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) - EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), - INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) - EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) - - return typename LazyProductReturnType::Type(derived(), other.derived()); -} -#endif // EIGEN_TEST_EVALUATORS } // end namespace Eigen diff --git a/Eigen/src/Core/Inverse.h b/Eigen/src/Core/Inverse.h index 84abd258e..5cfa7e50c 100644 --- a/Eigen/src/Core/Inverse.h +++ b/Eigen/src/Core/Inverse.h @@ -12,8 +12,6 @@ namespace Eigen { -#ifdef EIGEN_TEST_EVALUATORS - // TODO move the general declaration in Core, and rename this file DenseInverseImpl.h, or something like this... template class InverseImpl; @@ -127,8 +125,6 @@ protected: } // end namespace internal -#endif - } // end namespace Eigen #endif // EIGEN_INVERSE_H diff --git a/Eigen/src/Core/Map.h b/Eigen/src/Core/Map.h index 7dfdc3d59..87c1787bf 100644 --- a/Eigen/src/Core/Map.h +++ b/Eigen/src/Core/Map.h @@ -80,26 +80,8 @@ struct traits > ? int(PlainObjectType::OuterStrideAtCompileTime) : int(StrideType::OuterStrideAtCompileTime), IsAligned = bool(EIGEN_ALIGN) && ((int(MapOptions)&Aligned)==Aligned), -#ifndef EIGEN_TEST_EVALUATORS - HasNoInnerStride = InnerStrideAtCompileTime == 1, - HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0, - HasNoStride = HasNoInnerStride && HasNoOuterStride, - IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic, - KeepsPacketAccess = bool(HasNoInnerStride) - && ( bool(IsDynamicSize) - || HasNoOuterStride - || ( OuterStrideAtCompileTime!=Dynamic - && ((static_cast(sizeof(Scalar))*OuterStrideAtCompileTime)%EIGEN_ALIGN_BYTES)==0 ) ), - Flags0 = TraitsBase::Flags & (~NestByRefBit), - Flags1 = IsAligned ? (int(Flags0) | AlignedBit) : (int(Flags0) & ~AlignedBit), - Flags2 = (bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime)) - ? int(Flags1) : int(Flags1 & ~LinearAccessBit), - Flags3 = is_lvalue::value ? int(Flags2) : (int(Flags2) & ~LvalueBit), - Flags = KeepsPacketAccess ? int(Flags3) : (int(Flags3) & ~PacketAccessBit) -#else Flags0 = TraitsBase::Flags & (~NestByRefBit), Flags = is_lvalue::value ? int(Flags0) : (int(Flags0) & ~LvalueBit) -#endif }; private: enum { Options }; // Expressions don't have Options diff --git a/Eigen/src/Core/MapBase.h b/Eigen/src/Core/MapBase.h index 591ea26fb..6d3b344e8 100644 --- a/Eigen/src/Core/MapBase.h +++ b/Eigen/src/Core/MapBase.h @@ -11,15 +11,9 @@ #ifndef EIGEN_MAPBASE_H #define EIGEN_MAPBASE_H -#ifndef EIGEN_TEST_EVALUATORS -#define EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) \ - EIGEN_STATIC_ASSERT((int(internal::traits::Flags) & LinearAccessBit) || Derived::IsVectorAtCompileTime, \ - YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT) -#else #define EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) \ EIGEN_STATIC_ASSERT((int(internal::evaluator::Flags) & LinearAccessBit) || Derived::IsVectorAtCompileTime, \ YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT) -#endif namespace Eigen { @@ -167,15 +161,7 @@ template class MapBase EIGEN_DEVICE_FUNC void checkSanity() const { -#ifndef EIGEN_TEST_EVALUATORS - // moved to evaluator - EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(internal::traits::Flags&PacketAccessBit, - internal::inner_stride_at_compile_time::ret==1), - PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1); - eigen_assert(EIGEN_IMPLIES(internal::traits::Flags&AlignedBit, (size_t(m_data) % EIGEN_ALIGN_BYTES) == 0) && "data is not aligned"); -#else eigen_assert(EIGEN_IMPLIES(internal::traits::IsAligned, (size_t(m_data) % EIGEN_ALIGN_BYTES) == 0) && "data is not aligned"); -#endif } PointerType m_data; diff --git a/Eigen/src/Core/Matrix.h b/Eigen/src/Core/Matrix.h index 1daaabb07..8a5821548 100644 --- a/Eigen/src/Core/Matrix.h +++ b/Eigen/src/Core/Matrix.h @@ -115,12 +115,8 @@ struct traits > MaxRowsAtCompileTime = _MaxRows, MaxColsAtCompileTime = _MaxCols, Flags = compute_matrix_flags<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::ret, -#ifndef EIGEN_TEST_EVALUATORS - CoeffReadCost = NumTraits::ReadCost, -#else // FIXME, the following flag in only used to define NeedsToAlign in PlainObjectBase EvaluatorFlags = compute_matrix_evaluator_flags<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::ret, -#endif Options = _Options, InnerStrideAtCompileTime = 1, OuterStrideAtCompileTime = (Options&RowMajor) ? ColsAtCompileTime : RowsAtCompileTime diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h index bb49b9e84..9dbbd6fb5 100644 --- a/Eigen/src/Core/MatrixBase.h +++ b/Eigen/src/Core/MatrixBase.h @@ -66,9 +66,6 @@ template class MatrixBase using Base::MaxSizeAtCompileTime; using Base::IsVectorAtCompileTime; using Base::Flags; -#ifndef EIGEN_TEST_EVALUATORS - using Base::CoeffReadCost; -#endif using Base::derived; using Base::const_cast_derived; @@ -188,25 +185,15 @@ template class MatrixBase { return this->lazyProduct(other); } #else -#ifdef EIGEN_TEST_EVALUATORS template const Product operator*(const MatrixBase &other) const; -#else - template - const typename ProductReturnType::Type - operator*(const MatrixBase &other) const; -#endif #endif template EIGEN_DEVICE_FUNC -#ifdef EIGEN_TEST_EVALUATORS const Product -#else - const typename LazyProductReturnType::Type -#endif lazyProduct(const MatrixBase &other) const; template @@ -218,17 +205,10 @@ template class MatrixBase template void applyOnTheRight(const EigenBase& other); -#ifndef EIGEN_TEST_EVALUATORS - template - EIGEN_DEVICE_FUNC - const DiagonalProduct - operator*(const DiagonalBase &diagonal) const; -#else // EIGEN_TEST_EVALUATORS template EIGEN_DEVICE_FUNC const Product operator*(const DiagonalBase &diagonal) const; -#endif // EIGEN_TEST_EVALUATORS template EIGEN_DEVICE_FUNC @@ -347,19 +327,12 @@ template class MatrixBase NoAlias noalias(); -#ifndef EIGEN_TEST_EVALUATORS - inline const ForceAlignedAccess forceAlignedAccess() const; - inline ForceAlignedAccess forceAlignedAccess(); - template inline typename internal::add_const_on_value_type,Derived&>::type>::type forceAlignedAccessIf() const; - template inline typename internal::conditional,Derived&>::type forceAlignedAccessIf(); -#else // TODO forceAlignedAccess is temporarly disabled // Need to find a nicer workaround. inline const Derived& forceAlignedAccess() const { return derived(); } inline Derived& forceAlignedAccess() { return derived(); } template inline const Derived& forceAlignedAccessIf() const { return derived(); } template inline Derived& forceAlignedAccessIf() { return derived(); } -#endif Scalar trace() const; @@ -382,13 +355,9 @@ template class MatrixBase const PartialPivLU lu() const; - #ifdef EIGEN_TEST_EVALUATORS EIGEN_DEVICE_FUNC const Inverse inverse() const; - #else - EIGEN_DEVICE_FUNC - const internal::inverse_impl inverse() const; - #endif + template void computeInverseAndDetWithCheck( ResultType& inverse, diff --git a/Eigen/src/Core/NoAlias.h b/Eigen/src/Core/NoAlias.h index fe6dded60..097c9c062 100644 --- a/Eigen/src/Core/NoAlias.h +++ b/Eigen/src/Core/NoAlias.h @@ -35,7 +35,6 @@ class NoAlias NoAlias(ExpressionType& expression) : m_expression(expression) {} -#ifdef EIGEN_TEST_EVALUATORS template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ExpressionType& operator=(const StorageBase& other) @@ -59,68 +58,6 @@ class NoAlias call_assignment_no_alias(m_expression, other.derived(), internal::sub_assign_op()); return m_expression; } - -#else - - /** Behaves like MatrixBase::lazyAssign(other) - * \sa MatrixBase::lazyAssign() */ - template - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE ExpressionType& operator=(const StorageBase& other) - { return internal::assign_selector::run(m_expression,other.derived()); } - - /** \sa MatrixBase::operator+= */ - template - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE ExpressionType& operator+=(const StorageBase& other) - { - typedef SelfCwiseBinaryOp, ExpressionType, OtherDerived> SelfAdder; - SelfAdder tmp(m_expression); - typedef typename internal::nested::type OtherDerivedNested; - typedef typename internal::remove_all::type _OtherDerivedNested; - internal::assign_selector::run(tmp,OtherDerivedNested(other.derived())); - return m_expression; - } - - /** \sa MatrixBase::operator-= */ - template - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE ExpressionType& operator-=(const StorageBase& other) - { - typedef SelfCwiseBinaryOp, ExpressionType, OtherDerived> SelfAdder; - SelfAdder tmp(m_expression); - typedef typename internal::nested::type OtherDerivedNested; - typedef typename internal::remove_all::type _OtherDerivedNested; - internal::assign_selector::run(tmp,OtherDerivedNested(other.derived())); - return m_expression; - } - -#ifndef EIGEN_PARSED_BY_DOXYGEN - template - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE ExpressionType& operator+=(const ProductBase& other) - { other.derived().addTo(m_expression); return m_expression; } - - template - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE ExpressionType& operator-=(const ProductBase& other) - { other.derived().subTo(m_expression); return m_expression; } - - template - EIGEN_STRONG_INLINE ExpressionType& operator+=(const CoeffBasedProduct& other) - { return m_expression.derived() += CoeffBasedProduct(other.lhs(), other.rhs()); } - - template - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE ExpressionType& operator-=(const CoeffBasedProduct& other) - { return m_expression.derived() -= CoeffBasedProduct(other.lhs(), other.rhs()); } - - template - ExpressionType& operator=(const ReturnByValue& func) - { return m_expression = func; } -#endif - -#endif EIGEN_DEVICE_FUNC ExpressionType& expression() const diff --git a/Eigen/src/Core/PermutationMatrix.h b/Eigen/src/Core/PermutationMatrix.h index 31e0697a1..200518173 100644 --- a/Eigen/src/Core/PermutationMatrix.h +++ b/Eigen/src/Core/PermutationMatrix.h @@ -61,9 +61,6 @@ class PermutationBase : public EigenBase typedef typename Traits::IndicesType IndicesType; enum { Flags = Traits::Flags, -#ifndef EIGEN_TEST_EVALUATORS - CoeffReadCost = Traits::CoeffReadCost, -#endif RowsAtCompileTime = Traits::RowsAtCompileTime, ColsAtCompileTime = Traits::ColsAtCompileTime, MaxRowsAtCompileTime = Traits::MaxRowsAtCompileTime, @@ -291,9 +288,7 @@ class PermutationMatrix : public PermutationBase Traits; public: -#ifdef EIGEN_TEST_EVALUATORS typedef const PermutationMatrix& Nested; -#endif #ifndef EIGEN_PARSED_BY_DOXYGEN typedef typename Traits::IndicesType IndicesType; @@ -484,18 +479,9 @@ struct traits > enum { RowsAtCompileTime = _IndicesType::SizeAtCompileTime, ColsAtCompileTime = _IndicesType::SizeAtCompileTime, -#ifdef EIGEN_TEST_EVALUATORS MaxRowsAtCompileTime = IndicesType::MaxSizeAtCompileTime, MaxColsAtCompileTime = IndicesType::MaxSizeAtCompileTime, -#else - MaxRowsAtCompileTime = IndicesType::MaxRowsAtCompileTime, // is this a bug in Eigen 2.2 ? - MaxColsAtCompileTime = IndicesType::MaxColsAtCompileTime, -#endif Flags = 0 -#ifndef EIGEN_TEST_EVALUATORS - , - CoeffReadCost = _IndicesType::CoeffReadCost -#endif }; }; } @@ -524,7 +510,6 @@ class PermutationWrapper : public PermutationBase &permutation, (permutation.derived(), matrix.derived()); } -#else // EIGEN_TEST_EVALUATORS - -/** \returns the matrix with the permutation applied to the columns. - */ -template -inline const internal::permut_matrix_product_retval -operator*(const MatrixBase& matrix, - const PermutationBase &permutation) -{ - return internal::permut_matrix_product_retval - - (permutation.derived(), matrix.derived()); -} - -/** \returns the matrix with the permutation applied to the rows. - */ -template -inline const internal::permut_matrix_product_retval - -operator*(const PermutationBase &permutation, - const MatrixBase& matrix) -{ - return internal::permut_matrix_product_retval - - (permutation.derived(), matrix.derived()); -} - -#endif // EIGEN_TEST_EVALUATORS - namespace internal { template @@ -682,9 +638,6 @@ class Transpose > typedef typename Derived::DenseMatrixType DenseMatrixType; enum { Flags = Traits::Flags, -#ifndef EIGEN_TEST_EVALUATORS - CoeffReadCost = Traits::CoeffReadCost, -#endif RowsAtCompileTime = Traits::RowsAtCompileTime, ColsAtCompileTime = Traits::ColsAtCompileTime, MaxRowsAtCompileTime = Traits::MaxRowsAtCompileTime, @@ -713,8 +666,6 @@ class Transpose > DenseMatrixType toDenseMatrix() const { return *this; } -#ifdef EIGEN_TEST_EVALUATORS - /** \returns the matrix with the inverse permutation applied to the columns. */ template friend @@ -733,28 +684,6 @@ class Transpose > return Product(*this, matrix.derived()); } -#else // EIGEN_TEST_EVALUATORS - - /** \returns the matrix with the inverse permutation applied to the columns. - */ - template friend - inline const internal::permut_matrix_product_retval - operator*(const MatrixBase& matrix, const Transpose& trPerm) - { - return internal::permut_matrix_product_retval(trPerm.m_permutation, matrix.derived()); - } - - /** \returns the matrix with the inverse permutation applied to the rows. - */ - template - inline const internal::permut_matrix_product_retval - operator*(const MatrixBase& matrix) const - { - return internal::permut_matrix_product_retval(m_permutation, matrix.derived()); - } - -#endif // EIGEN_TEST_EVALUATORS - const PermutationType& nestedPermutation() const { return m_permutation; } protected: @@ -767,7 +696,6 @@ const PermutationWrapper MatrixBase::asPermutation() con return derived(); } -#ifdef EIGEN_TEST_EVALUATORS namespace internal { // TODO currently a permutation matrix expression has the form PermutationMatrix or PermutationWrapper @@ -799,7 +727,6 @@ struct evaluator_traits > > template<> struct AssignmentKind { typedef EigenBase2EigenBase Kind; }; } // end namespace internal -#endif // EIGEN_TEST_EVALUATORS } // end namespace Eigen diff --git a/Eigen/src/Core/PlainObjectBase.h b/Eigen/src/Core/PlainObjectBase.h index 3637b6256..11aec1552 100644 --- a/Eigen/src/Core/PlainObjectBase.h +++ b/Eigen/src/Core/PlainObjectBase.h @@ -128,11 +128,7 @@ class PlainObjectBase : public internal::dense_xpr_base::type DenseStorage m_storage; public: -#ifndef EIGEN_TEST_EVALUATORS - enum { NeedsToAlign = SizeAtCompileTime != Dynamic && (internal::traits::Flags & AlignedBit) != 0 }; -#else enum { NeedsToAlign = SizeAtCompileTime != Dynamic && (internal::traits::EvaluatorFlags & AlignedBit) != 0 }; -#endif EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) EIGEN_DEVICE_FUNC @@ -643,7 +639,6 @@ class PlainObjectBase : public internal::dense_xpr_base::type * * \internal */ -#ifdef EIGEN_TEST_EVALUATORS // aliasing is dealt once in internall::call_assignment // so at this stage we have to assume aliasing... and resising has to be done later. template @@ -654,23 +649,7 @@ class PlainObjectBase : public internal::dense_xpr_base::type return this->derived(); return this->derived(); } -#else - template - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE Derived& _set(const DenseBase& other) - { - _set_selector(other.derived(), typename internal::conditional(int(OtherDerived::Flags) & EvalBeforeAssigningBit), internal::true_type, internal::false_type>::type()); - return this->derived(); - } - template - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const internal::true_type&) { _set_noalias(other.eval()); } - - template - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const internal::false_type&) { _set_noalias(other); } -#endif /** \internal Like _set() but additionally makes the assumption that no aliasing effect can happen (which * is the case when creating a new matrix) so one can enforce lazy evaluation. * @@ -685,12 +664,8 @@ class PlainObjectBase : public internal::dense_xpr_base::type //_resize_to_match(other); // the 'false' below means to enforce lazy evaluation. We don't use lazyAssign() because // it wouldn't allow to copy a row-vector into a column-vector. -#ifdef EIGEN_TEST_EVALUATORS internal::call_assignment_no_alias(this->derived(), other.derived(), internal::assign_op()); return this->derived(); -#else - return internal::assign_selector::run(this->derived(), other.derived()); -#endif } template diff --git a/Eigen/src/Core/Product.h b/Eigen/src/Core/Product.h index 6825873d5..ae64d5200 100644 --- a/Eigen/src/Core/Product.h +++ b/Eigen/src/Core/Product.h @@ -79,11 +79,6 @@ struct traits > // FIXME: only needed by GeneralMatrixMatrixTriangular InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(LhsTraits::ColsAtCompileTime, RhsTraits::RowsAtCompileTime), -#ifndef EIGEN_TEST_EVALUATORS - // dummy, for evaluators unit test only - CoeffReadCost = Dynamic, -#endif - // The storage order is somewhat arbitrary here. The correct one will be determined through the evaluator. Flags = ( (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) || ((LhsTraits::Flags&NoPreferredStorageOrderBit) && (RhsTraits::Flags&RowMajorBit)) @@ -164,7 +159,6 @@ public: } // namespace internal -#ifdef EIGEN_TEST_EVALUATORS // Generic API dispatcher template class ProductImpl : public internal::generic_xpr_base, MatrixXpr, StorageKind>::type @@ -172,7 +166,6 @@ class ProductImpl : public internal::generic_xpr_base, M public: typedef typename internal::generic_xpr_base, MatrixXpr, StorageKind>::type Base; }; -#endif template class ProductImpl diff --git a/Eigen/src/Core/ProductBase.h b/Eigen/src/Core/ProductBase.h index 4b1fe356b..050343b2d 100644 --- a/Eigen/src/Core/ProductBase.h +++ b/Eigen/src/Core/ProductBase.h @@ -12,258 +12,6 @@ namespace Eigen { -#ifndef EIGEN_TEST_EVALUATORS - -/** \class ProductBase - * \ingroup Core_Module - * - */ - -namespace internal { -template -struct traits > -{ - typedef MatrixXpr XprKind; - typedef typename remove_all<_Lhs>::type Lhs; - typedef typename remove_all<_Rhs>::type Rhs; - typedef typename scalar_product_traits::ReturnType Scalar; - typedef typename product_promote_storage_type::StorageKind, - typename traits::StorageKind, - 0>::ret StorageKind; - typedef typename promote_index_type::Index, - typename traits::Index>::type Index; - enum { - RowsAtCompileTime = traits::RowsAtCompileTime, - ColsAtCompileTime = traits::ColsAtCompileTime, - MaxRowsAtCompileTime = traits::MaxRowsAtCompileTime, - MaxColsAtCompileTime = traits::MaxColsAtCompileTime, - Flags = (MaxRowsAtCompileTime==1 ? RowMajorBit : 0) - | EvalBeforeNestingBit | EvalBeforeAssigningBit | NestByRefBit, - // Note that EvalBeforeNestingBit and NestByRefBit - // are not used in practice because nested is overloaded for products - CoeffReadCost = 0 // FIXME why is it needed ? - }; -}; -} - -#define EIGEN_PRODUCT_PUBLIC_INTERFACE(Derived) \ - typedef ProductBase Base; \ - EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \ - typedef typename Base::LhsNested LhsNested; \ - typedef typename Base::_LhsNested _LhsNested; \ - typedef typename Base::LhsBlasTraits LhsBlasTraits; \ - typedef typename Base::ActualLhsType ActualLhsType; \ - typedef typename Base::_ActualLhsType _ActualLhsType; \ - typedef typename Base::RhsNested RhsNested; \ - typedef typename Base::_RhsNested _RhsNested; \ - typedef typename Base::RhsBlasTraits RhsBlasTraits; \ - typedef typename Base::ActualRhsType ActualRhsType; \ - typedef typename Base::_ActualRhsType _ActualRhsType; \ - using Base::m_lhs; \ - using Base::m_rhs; - -template -class ProductBase : public MatrixBase -{ - public: - typedef MatrixBase Base; - EIGEN_DENSE_PUBLIC_INTERFACE(ProductBase) - - typedef typename Lhs::Nested LhsNested; - typedef typename internal::remove_all::type _LhsNested; - typedef internal::blas_traits<_LhsNested> LhsBlasTraits; - typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; - typedef typename internal::remove_all::type _ActualLhsType; - typedef typename internal::traits::Scalar LhsScalar; - - typedef typename Rhs::Nested RhsNested; - typedef typename internal::remove_all::type _RhsNested; - typedef internal::blas_traits<_RhsNested> RhsBlasTraits; - typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; - typedef typename internal::remove_all::type _ActualRhsType; - typedef typename internal::traits::Scalar RhsScalar; - - // Diagonal of a product: no need to evaluate the arguments because they are going to be evaluated only once - typedef CoeffBasedProduct FullyLazyCoeffBaseProductType; - - public: - - typedef typename Base::PlainObject PlainObject; - - ProductBase(const Lhs& a_lhs, const Rhs& a_rhs) - : m_lhs(a_lhs), m_rhs(a_rhs) - { - eigen_assert(a_lhs.cols() == a_rhs.rows() - && "invalid matrix product" - && "if you wanted a coeff-wise or a dot product use the respective explicit functions"); - } - - inline Index rows() const { return m_lhs.rows(); } - inline Index cols() const { return m_rhs.cols(); } - - template - inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst,Scalar(1)); } - - template - inline void addTo(Dest& dst) const { scaleAndAddTo(dst,Scalar(1)); } - - template - inline void subTo(Dest& dst) const { scaleAndAddTo(dst,Scalar(-1)); } - - template - inline void scaleAndAddTo(Dest& dst, const Scalar& alpha) const { derived().scaleAndAddTo(dst,alpha); } - - const _LhsNested& lhs() const { return m_lhs; } - const _RhsNested& rhs() const { return m_rhs; } - - // Implicit conversion to the nested type (trigger the evaluation of the product) - operator const PlainObject& () const - { - m_result.resize(m_lhs.rows(), m_rhs.cols()); - derived().evalTo(m_result); - return m_result; - } - - const Diagonal diagonal() const - { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); } - - template - const Diagonal diagonal() const - { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); } - - const Diagonal diagonal(Index index) const - { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs).diagonal(index); } - - // restrict coeff accessors to 1x1 expressions. No need to care about mutators here since this isn't an Lvalue expression - typename Base::CoeffReturnType coeff(Index row, Index col) const - { - EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) - eigen_assert(this->rows() == 1 && this->cols() == 1); - Matrix result = *this; - return result.coeff(row,col); - } - - typename Base::CoeffReturnType coeff(Index i) const - { - EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) - eigen_assert(this->rows() == 1 && this->cols() == 1); - Matrix result = *this; - return result.coeff(i); - } - - const Scalar& coeffRef(Index row, Index col) const - { - EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) - eigen_assert(this->rows() == 1 && this->cols() == 1); - return derived().coeffRef(row,col); - } - - const Scalar& coeffRef(Index i) const - { - EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) - eigen_assert(this->rows() == 1 && this->cols() == 1); - return derived().coeffRef(i); - } - - protected: - - LhsNested m_lhs; - RhsNested m_rhs; - - mutable PlainObject m_result; -}; - -// here we need to overload the nested rule for products -// such that the nested type is a const reference to a plain matrix -namespace internal { -template -struct nested, N, PlainObject> -{ - typedef PlainObject const& type; -}; -} - -template -class ScaledProduct; - -// Note that these two operator* functions are not defined as member -// functions of ProductBase, because, otherwise we would have to -// define all overloads defined in MatrixBase. Furthermore, Using -// "using Base::operator*" would not work with MSVC. -// -// Also note that here we accept any compatible scalar types -template -const ScaledProduct -operator*(const ProductBase& prod, const typename Derived::Scalar& x) -{ return ScaledProduct(prod.derived(), x); } - -template -typename internal::enable_if::value, - const ScaledProduct >::type -operator*(const ProductBase& prod, const typename Derived::RealScalar& x) -{ return ScaledProduct(prod.derived(), x); } - - -template -const ScaledProduct -operator*(const typename Derived::Scalar& x,const ProductBase& prod) -{ return ScaledProduct(prod.derived(), x); } - -template -typename internal::enable_if::value, - const ScaledProduct >::type -operator*(const typename Derived::RealScalar& x,const ProductBase& prod) -{ return ScaledProduct(prod.derived(), x); } - -namespace internal { -template -struct traits > - : traits, - typename NestedProduct::_LhsNested, - typename NestedProduct::_RhsNested> > -{ - typedef typename traits::StorageKind StorageKind; -}; -} - -template -class ScaledProduct - : public ProductBase, - typename NestedProduct::_LhsNested, - typename NestedProduct::_RhsNested> -{ - public: - typedef ProductBase, - typename NestedProduct::_LhsNested, - typename NestedProduct::_RhsNested> Base; - typedef typename Base::Scalar Scalar; - typedef typename Base::PlainObject PlainObject; -// EIGEN_PRODUCT_PUBLIC_INTERFACE(ScaledProduct) - - ScaledProduct(const NestedProduct& prod, const Scalar& x) - : Base(prod.lhs(),prod.rhs()), m_prod(prod), m_alpha(x) {} - - template - inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst, Scalar(1)); } - - template - inline void addTo(Dest& dst) const { scaleAndAddTo(dst, Scalar(1)); } - - template - inline void subTo(Dest& dst) const { scaleAndAddTo(dst, Scalar(-1)); } - - template - inline void scaleAndAddTo(Dest& dst, const Scalar& a_alpha) const { m_prod.derived().scaleAndAddTo(dst,a_alpha * m_alpha); } - - const Scalar& alpha() const { return m_alpha; } - - protected: - const NestedProduct& m_prod; - Scalar m_alpha; -}; - -#endif // EIGEN_TEST_EVALUATORS - /** \internal * Overloaded to perform an efficient C = (A*B).lazy() */ template diff --git a/Eigen/src/Core/Redux.h b/Eigen/src/Core/Redux.h index 0aeae88bc..c6c355d43 100644 --- a/Eigen/src/Core/Redux.h +++ b/Eigen/src/Core/Redux.h @@ -69,11 +69,7 @@ public: #ifdef EIGEN_DEBUG_ASSIGN static void debug() { -#ifdef EIGEN_TEST_EVALUATORS std::cerr << "Xpr: " << typeid(typename Derived::XprType).name() << std::endl; -#else - std::cerr << "Xpr: " << typeid(Derived).name() << std::endl; -#endif std::cerr.setf(std::ios::hex, std::ios::basefield); EIGEN_DEBUG_VAR(Derived::Flags) std::cerr.unsetf(std::ios::hex); @@ -338,7 +334,6 @@ struct redux_impl } }; -#ifdef EIGEN_ENABLE_EVALUATORS // evaluator adaptor template class redux_evaluator @@ -395,7 +390,6 @@ protected: typename internal::evaluator::nestedType m_evaluator; const XprType &m_xpr; }; -#endif } // end namespace internal @@ -417,7 +411,6 @@ EIGEN_STRONG_INLINE typename internal::result_of::redux(const Func& func) const { eigen_assert(this->rows()>0 && this->cols()>0 && "you are using an empty matrix"); -#ifdef EIGEN_TEST_EVALUATORS // FIXME, eval_nest should be handled by redux_evaluator, however: // - it is currently difficult to provide the right Flags since they are still handled by the expressions @@ -433,13 +426,6 @@ DenseBase::redux(const Func& func) const ThisEvaluator thisEval(derived()); return internal::redux_impl::run(thisEval, func); - -#else - typedef typename internal::remove_all::type ThisNested; - - return internal::redux_impl - ::run(derived(), func); -#endif } /** \returns the minimum of all coefficients of \c *this. diff --git a/Eigen/src/Core/Ref.h b/Eigen/src/Core/Ref.h index 6390a8b64..09921c9e7 100644 --- a/Eigen/src/Core/Ref.h +++ b/Eigen/src/Core/Ref.h @@ -243,11 +243,7 @@ template class Ref< template void construct(const Expression& expr, internal::false_type) { -#ifdef EIGEN_TEST_EVALUATORS internal::call_assignment_no_alias(m_object,expr,internal::assign_op()); -#else - m_object.lazyAssign(expr); -#endif Base::construct(m_object); } diff --git a/Eigen/src/Core/Replicate.h b/Eigen/src/Core/Replicate.h index e63f0d421..3777049ee 100644 --- a/Eigen/src/Core/Replicate.h +++ b/Eigen/src/Core/Replicate.h @@ -54,13 +54,8 @@ struct traits > : MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1 ? 0 : (MatrixType::Flags & RowMajorBit) ? 1 : 0, -#ifndef EIGEN_TEST_EVALUATORS - Flags = (_MatrixTypeNested::Flags & HereditaryBits & ~RowMajorBit) | (IsRowMajor ? RowMajorBit : 0), - CoeffReadCost = _MatrixTypeNested::CoeffReadCost -#else // FIXME enable DirectAccess with negative strides? Flags = IsRowMajor ? RowMajorBit : 0 -#endif }; }; } diff --git a/Eigen/src/Core/ReturnByValue.h b/Eigen/src/Core/ReturnByValue.h index 9d53fba27..f4e12a93b 100644 --- a/Eigen/src/Core/ReturnByValue.h +++ b/Eigen/src/Core/ReturnByValue.h @@ -33,25 +33,18 @@ struct traits > }; }; -#ifndef EIGEN_TEST_EVALUATORS /* The ReturnByValue object doesn't even have a coeff() method. * So the only way that nesting it in an expression can work, is by evaluating it into a plain matrix. * So internal::nested always gives the plain return matrix type. * * FIXME: I don't understand why we need this specialization: isn't this taken care of by the EvalBeforeNestingBit ?? + * Answer: EvalBeforeNestingBit should be deprecated since we have the evaluators */ template -struct nested, n, PlainObject> -{ - typedef typename traits::ReturnType type; -}; -#else -template struct nested_eval, n, PlainObject> { typedef typename traits::ReturnType type; }; -#endif } // end namespace internal @@ -93,7 +86,6 @@ Derived& DenseBase::operator=(const ReturnByValue& other) return derived(); } -#ifdef EIGEN_TEST_EVALUATORS namespace internal { // Expression is evaluated in a temporary; default implementation of Assignment is bypassed so that @@ -123,7 +115,6 @@ protected: }; } // end namespace internal -#endif } // end namespace Eigen diff --git a/Eigen/src/Core/Reverse.h b/Eigen/src/Core/Reverse.h index ceb6e4701..01de90800 100644 --- a/Eigen/src/Core/Reverse.h +++ b/Eigen/src/Core/Reverse.h @@ -44,17 +44,7 @@ struct traits > ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, - -#ifndef EIGEN_TEST_EVALUATORS - // let's enable LinearAccess only with vectorization because of the product overhead - LinearAccess = ( (Direction==BothDirections) && (int(_MatrixTypeNested::Flags)&PacketAccessBit) ) - ? LinearAccessBit : 0, - - Flags = int(_MatrixTypeNested::Flags) & (HereditaryBits | LvalueBit | PacketAccessBit | LinearAccess), - CoeffReadCost = _MatrixTypeNested::CoeffReadCost -#else Flags = _MatrixTypeNested::Flags & (RowMajorBit | LvalueBit) -#endif }; }; diff --git a/Eigen/src/Core/Select.h b/Eigen/src/Core/Select.h index d4fd88e62..0cb85a4ad 100644 --- a/Eigen/src/Core/Select.h +++ b/Eigen/src/Core/Select.h @@ -43,14 +43,7 @@ struct traits > ColsAtCompileTime = ConditionMatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = ConditionMatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = ConditionMatrixType::MaxColsAtCompileTime, -#ifndef EIGEN_TEST_EVALUATORS - Flags = (unsigned int)ThenMatrixType::Flags & ElseMatrixType::Flags & HereditaryBits, - CoeffReadCost = traits::type>::CoeffReadCost - + EIGEN_SIZE_MAX(traits::type>::CoeffReadCost, - traits::type>::CoeffReadCost) -#else Flags = (unsigned int)ThenMatrixType::Flags & ElseMatrixType::Flags & RowMajorBit -#endif }; }; } diff --git a/Eigen/src/Core/SelfAdjointView.h b/Eigen/src/Core/SelfAdjointView.h index 546f61252..19cb232c9 100644 --- a/Eigen/src/Core/SelfAdjointView.h +++ b/Eigen/src/Core/SelfAdjointView.h @@ -40,20 +40,10 @@ struct traits > : traits Mode = UpLo | SelfAdjoint, Flags = MatrixTypeNestedCleaned::Flags & (HereditaryBits) & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit)) // FIXME these flags should be preserved -#ifndef EIGEN_TEST_EVALUATORS - , - CoeffReadCost = MatrixTypeNestedCleaned::CoeffReadCost -#endif }; }; } -#ifndef EIGEN_TEST_EVALUATORS -template -struct SelfadjointProductMatrix; -#endif - // FIXME could also be called SelfAdjointWrapper to be consistent with DiagonalWrapper ?? template class SelfAdjointView : public TriangularBase > @@ -118,8 +108,6 @@ template class SelfAdjointView EIGEN_DEVICE_FUNC MatrixTypeNestedCleaned& nestedExpression() { return *const_cast(&m_matrix); } -#ifdef EIGEN_TEST_EVALUATORS - /** Efficient triangular matrix times vector/matrix product */ template EIGEN_DEVICE_FUNC @@ -145,31 +133,6 @@ template class SelfAdjointView return (s*mat.nestedExpression()).template selfadjointView(); } -#else // EIGEN_TEST_EVALUATORS - - /** Efficient self-adjoint matrix times vector/matrix product */ - template - EIGEN_DEVICE_FUNC - SelfadjointProductMatrix - operator*(const MatrixBase& rhs) const - { - return SelfadjointProductMatrix - - (m_matrix, rhs.derived()); - } - - /** Efficient vector/matrix times self-adjoint matrix product */ - template friend - EIGEN_DEVICE_FUNC - SelfadjointProductMatrix - operator*(const MatrixBase& lhs, const SelfAdjointView& rhs) - { - return SelfadjointProductMatrix - - (lhs.derived(),rhs.m_matrix); - } -#endif - /** Perform a symmetric rank 2 update of the selfadjoint matrix \c *this: * \f$ this = this + \alpha u v^* + conj(\alpha) v u^* \f$ * \returns a reference to \c *this @@ -231,104 +194,6 @@ template class SelfAdjointView namespace internal { -#ifndef EIGEN_TEST_EVALUATORS - -template -struct triangular_assignment_selector -{ - enum { - col = (UnrollCount-1) / Derived1::RowsAtCompileTime, - row = (UnrollCount-1) % Derived1::RowsAtCompileTime - }; - - EIGEN_DEVICE_FUNC - static inline void run(Derived1 &dst, const Derived2 &src) - { - triangular_assignment_selector::run(dst, src); - - if(row == col) - dst.coeffRef(row, col) = numext::real(src.coeff(row, col)); - else if(row < col) - dst.coeffRef(col, row) = numext::conj(dst.coeffRef(row, col) = src.coeff(row, col)); - } -}; - -template -struct triangular_assignment_selector -{ - EIGEN_DEVICE_FUNC - static inline void run(Derived1 &, const Derived2 &) {} -}; - -template -struct triangular_assignment_selector -{ - enum { - col = (UnrollCount-1) / Derived1::RowsAtCompileTime, - row = (UnrollCount-1) % Derived1::RowsAtCompileTime - }; - - EIGEN_DEVICE_FUNC - static inline void run(Derived1 &dst, const Derived2 &src) - { - triangular_assignment_selector::run(dst, src); - - if(row == col) - dst.coeffRef(row, col) = numext::real(src.coeff(row, col)); - else if(row > col) - dst.coeffRef(col, row) = numext::conj(dst.coeffRef(row, col) = src.coeff(row, col)); - } -}; - -template -struct triangular_assignment_selector -{ - EIGEN_DEVICE_FUNC - static inline void run(Derived1 &, const Derived2 &) {} -}; - -template -struct triangular_assignment_selector -{ - typedef typename Derived1::Index Index; - EIGEN_DEVICE_FUNC - static inline void run(Derived1 &dst, const Derived2 &src) - { - for(Index j = 0; j < dst.cols(); ++j) - { - for(Index i = 0; i < j; ++i) - { - dst.copyCoeff(i, j, src); - dst.coeffRef(j,i) = numext::conj(dst.coeff(i,j)); - } - dst.copyCoeff(j, j, src); - } - } -}; - -template -struct triangular_assignment_selector -{ - EIGEN_DEVICE_FUNC - static inline void run(Derived1 &dst, const Derived2 &src) - { - typedef typename Derived1::Index Index; - for(Index i = 0; i < dst.rows(); ++i) - { - for(Index j = 0; j < i; ++j) - { - dst.copyCoeff(i, j, src); - dst.coeffRef(j,i) = numext::conj(dst.coeff(i,j)); - } - dst.copyCoeff(i, i, src); - } - } -}; - -#endif // EIGEN_TEST_EVALUATORS - -#ifdef EIGEN_ENABLE_EVALUATORS - // TODO currently a selfadjoint expression has the form SelfAdjointView<.,.> // in the future selfadjoint-ness should be defined by the expression traits // such that Transpose > is valid. (currently TriangularBase::transpose() is overloaded to make it work) @@ -382,8 +247,6 @@ public: { eigen_internal_assert(false && "should never be called"); } }; -#endif // EIGEN_ENABLE_EVALUATORS - } // end namespace internal /*************************************************************************** diff --git a/Eigen/src/Core/SelfCwiseBinaryOp.h b/Eigen/src/Core/SelfCwiseBinaryOp.h index bec6f4968..38185d9d7 100644 --- a/Eigen/src/Core/SelfCwiseBinaryOp.h +++ b/Eigen/src/Core/SelfCwiseBinaryOp.h @@ -12,178 +12,6 @@ namespace Eigen { -#ifndef EIGEN_TEST_EVALUATORS - -/** \class SelfCwiseBinaryOp - * \ingroup Core_Module - * - * \internal - * - * \brief Internal helper class for optimizing operators like +=, -= - * - * This is a pseudo expression class re-implementing the copyCoeff/copyPacket - * method to directly performs a +=/-= operations in an optimal way. In particular, - * this allows to make sure that the input/output data are loaded only once using - * aligned packet loads. - * - * \sa class SwapWrapper for a similar trick. - */ - -namespace internal { -template -struct traits > - : traits > -{ - enum { - // Note that it is still a good idea to preserve the DirectAccessBit - // so that assign can correctly align the data. - Flags = traits >::Flags | (Lhs::Flags&AlignedBit) | (Lhs::Flags&DirectAccessBit) | (Lhs::Flags&LvalueBit), - OuterStrideAtCompileTime = Lhs::OuterStrideAtCompileTime, - InnerStrideAtCompileTime = Lhs::InnerStrideAtCompileTime - }; -}; -} - -template class SelfCwiseBinaryOp - : public internal::dense_xpr_base< SelfCwiseBinaryOp >::type -{ - public: - - typedef typename internal::dense_xpr_base::type Base; - EIGEN_DENSE_PUBLIC_INTERFACE(SelfCwiseBinaryOp) - - typedef typename internal::packet_traits::type Packet; - - EIGEN_DEVICE_FUNC - inline SelfCwiseBinaryOp(Lhs& xpr, const BinaryOp& func = BinaryOp()) : m_matrix(xpr), m_functor(func) {} - - EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.rows(); } - EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.cols(); } - EIGEN_DEVICE_FUNC inline Index outerStride() const { return m_matrix.outerStride(); } - EIGEN_DEVICE_FUNC inline Index innerStride() const { return m_matrix.innerStride(); } - EIGEN_DEVICE_FUNC inline const Scalar* data() const { return m_matrix.data(); } - - // note that this function is needed by assign to correctly align loads/stores - // TODO make Assign use .data() - EIGEN_DEVICE_FUNC - inline Scalar& coeffRef(Index row, Index col) - { - EIGEN_STATIC_ASSERT_LVALUE(Lhs) - return m_matrix.const_cast_derived().coeffRef(row, col); - } - EIGEN_DEVICE_FUNC - inline const Scalar& coeffRef(Index row, Index col) const - { - return m_matrix.coeffRef(row, col); - } - - // note that this function is needed by assign to correctly align loads/stores - // TODO make Assign use .data() - EIGEN_DEVICE_FUNC - inline Scalar& coeffRef(Index index) - { - EIGEN_STATIC_ASSERT_LVALUE(Lhs) - return m_matrix.const_cast_derived().coeffRef(index); - } - EIGEN_DEVICE_FUNC - inline const Scalar& coeffRef(Index index) const - { - return m_matrix.const_cast_derived().coeffRef(index); - } - - template - EIGEN_DEVICE_FUNC - void copyCoeff(Index row, Index col, const DenseBase& other) - { - OtherDerived& _other = other.const_cast_derived(); - eigen_internal_assert(row >= 0 && row < rows() - && col >= 0 && col < cols()); - Scalar& tmp = m_matrix.coeffRef(row,col); - tmp = m_functor(tmp, _other.coeff(row,col)); - } - - template - EIGEN_DEVICE_FUNC - void copyCoeff(Index index, const DenseBase& other) - { - OtherDerived& _other = other.const_cast_derived(); - eigen_internal_assert(index >= 0 && index < m_matrix.size()); - Scalar& tmp = m_matrix.coeffRef(index); - tmp = m_functor(tmp, _other.coeff(index)); - } - - template - void copyPacket(Index row, Index col, const DenseBase& other) - { - OtherDerived& _other = other.const_cast_derived(); - eigen_internal_assert(row >= 0 && row < rows() - && col >= 0 && col < cols()); - m_matrix.template writePacket(row, col, - m_functor.packetOp(m_matrix.template packet(row, col),_other.template packet(row, col)) ); - } - - template - void copyPacket(Index index, const DenseBase& other) - { - OtherDerived& _other = other.const_cast_derived(); - eigen_internal_assert(index >= 0 && index < m_matrix.size()); - m_matrix.template writePacket(index, - m_functor.packetOp(m_matrix.template packet(index),_other.template packet(index)) ); - } - - // reimplement lazyAssign to handle complex *= real - // see CwiseBinaryOp ctor for details - template - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE SelfCwiseBinaryOp& lazyAssign(const DenseBase& rhs) - { - EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs,RhsDerived) - EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp,typename Lhs::Scalar,typename RhsDerived::Scalar); - - #ifdef EIGEN_DEBUG_ASSIGN - internal::assign_traits::debug(); - #endif - eigen_assert(rows() == rhs.rows() && cols() == rhs.cols()); - internal::assign_impl::run(*this,rhs.derived()); - #ifndef EIGEN_NO_DEBUG - this->checkTransposeAliasing(rhs.derived()); - #endif - return *this; - } - - // overloaded to honor evaluation of special matrices - // maybe another solution would be to not use SelfCwiseBinaryOp - // at first... - EIGEN_DEVICE_FUNC - SelfCwiseBinaryOp& operator=(const Rhs& _rhs) - { - typename internal::nested::type rhs(_rhs); - return Base::operator=(rhs); - } - - EIGEN_DEVICE_FUNC - Lhs& expression() const - { - return m_matrix; - } - - EIGEN_DEVICE_FUNC - const BinaryOp& functor() const - { - return m_functor; - } - - protected: - Lhs& m_matrix; - const BinaryOp& m_functor; - - private: - SelfCwiseBinaryOp& operator=(const SelfCwiseBinaryOp&); -}; - -#endif // EIGEN_TEST_EVALUATORS - -#ifdef EIGEN_TEST_EVALUATORS template inline Derived& DenseBase::operator*=(const Scalar& other) { @@ -215,43 +43,6 @@ inline Derived& DenseBase::operator/=(const Scalar& other) internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::div_assign_op()); return derived(); } -#else -template -inline Derived& DenseBase::operator*=(const Scalar& other) -{ - typedef typename Derived::PlainObject PlainObject; - SelfCwiseBinaryOp, Derived, typename PlainObject::ConstantReturnType> tmp(derived()); - tmp = PlainObject::Constant(rows(),cols(),other); - return derived(); -} - -template -inline Derived& ArrayBase::operator+=(const Scalar& other) -{ - typedef typename Derived::PlainObject PlainObject; - SelfCwiseBinaryOp, Derived, typename PlainObject::ConstantReturnType> tmp(derived()); - tmp = PlainObject::Constant(rows(),cols(),other); - return derived(); -} - -template -inline Derived& ArrayBase::operator-=(const Scalar& other) -{ - typedef typename Derived::PlainObject PlainObject; - SelfCwiseBinaryOp, Derived, typename PlainObject::ConstantReturnType> tmp(derived()); - tmp = PlainObject::Constant(rows(),cols(),other); - return derived(); -} - -template -inline Derived& DenseBase::operator/=(const Scalar& other) -{ - typedef typename Derived::PlainObject PlainObject; - SelfCwiseBinaryOp, Derived, typename PlainObject::ConstantReturnType> tmp(derived()); - tmp = PlainObject::Constant(rows(),cols(), other); - return derived(); -} -#endif } // end namespace Eigen diff --git a/Eigen/src/Core/Solve.h b/Eigen/src/Core/Solve.h index a1501c259..7b12be1e6 100644 --- a/Eigen/src/Core/Solve.h +++ b/Eigen/src/Core/Solve.h @@ -99,7 +99,6 @@ private: Scalar coeff(Index i) const; }; -#ifdef EIGEN_TEST_EVALUATORS // Generic API dispatcher template class SolveImpl : public internal::generic_xpr_base, MatrixXpr, StorageKind>::type @@ -107,7 +106,6 @@ class SolveImpl : public internal::generic_xpr_base public: typedef typename internal::generic_xpr_base, MatrixXpr, StorageKind>::type Base; }; -#endif namespace internal { diff --git a/Eigen/src/Core/Swap.h b/Eigen/src/Core/Swap.h index 9a1c5f4f8..3277cb5ba 100644 --- a/Eigen/src/Core/Swap.h +++ b/Eigen/src/Core/Swap.h @@ -12,135 +12,6 @@ namespace Eigen { -#ifndef EIGEN_TEST_EVALUATORS - -/** \class SwapWrapper - * \ingroup Core_Module - * - * \internal - * - * \brief Internal helper class for swapping two expressions - */ -namespace internal { -template -struct traits > : traits {}; -} - -template class SwapWrapper - : public internal::dense_xpr_base >::type -{ - public: - - typedef typename internal::dense_xpr_base::type Base; - EIGEN_DENSE_PUBLIC_INTERFACE(SwapWrapper) - typedef typename internal::packet_traits::type Packet; - - EIGEN_DEVICE_FUNC - inline SwapWrapper(ExpressionType& xpr) : m_expression(xpr) {} - - EIGEN_DEVICE_FUNC - inline Index rows() const { return m_expression.rows(); } - EIGEN_DEVICE_FUNC - inline Index cols() const { return m_expression.cols(); } - EIGEN_DEVICE_FUNC - inline Index outerStride() const { return m_expression.outerStride(); } - EIGEN_DEVICE_FUNC - inline Index innerStride() const { return m_expression.innerStride(); } - - typedef typename internal::conditional< - internal::is_lvalue::value, - Scalar, - const Scalar - >::type ScalarWithConstIfNotLvalue; - - EIGEN_DEVICE_FUNC - inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); } - EIGEN_DEVICE_FUNC - inline const Scalar* data() const { return m_expression.data(); } - - EIGEN_DEVICE_FUNC - inline Scalar& coeffRef(Index rowId, Index colId) - { - return m_expression.const_cast_derived().coeffRef(rowId, colId); - } - - EIGEN_DEVICE_FUNC - inline Scalar& coeffRef(Index index) - { - return m_expression.const_cast_derived().coeffRef(index); - } - - EIGEN_DEVICE_FUNC - inline Scalar& coeffRef(Index rowId, Index colId) const - { - return m_expression.coeffRef(rowId, colId); - } - - EIGEN_DEVICE_FUNC - inline Scalar& coeffRef(Index index) const - { - return m_expression.coeffRef(index); - } - - template - EIGEN_DEVICE_FUNC - void copyCoeff(Index rowId, Index colId, const DenseBase& other) - { - OtherDerived& _other = other.const_cast_derived(); - eigen_internal_assert(rowId >= 0 && rowId < rows() - && colId >= 0 && colId < cols()); - Scalar tmp = m_expression.coeff(rowId, colId); - m_expression.coeffRef(rowId, colId) = _other.coeff(rowId, colId); - _other.coeffRef(rowId, colId) = tmp; - } - - template - EIGEN_DEVICE_FUNC - void copyCoeff(Index index, const DenseBase& other) - { - OtherDerived& _other = other.const_cast_derived(); - eigen_internal_assert(index >= 0 && index < m_expression.size()); - Scalar tmp = m_expression.coeff(index); - m_expression.coeffRef(index) = _other.coeff(index); - _other.coeffRef(index) = tmp; - } - - template - void copyPacket(Index rowId, Index colId, const DenseBase& other) - { - OtherDerived& _other = other.const_cast_derived(); - eigen_internal_assert(rowId >= 0 && rowId < rows() - && colId >= 0 && colId < cols()); - Packet tmp = m_expression.template packet(rowId, colId); - m_expression.template writePacket(rowId, colId, - _other.template packet(rowId, colId) - ); - _other.template writePacket(rowId, colId, tmp); - } - - template - void copyPacket(Index index, const DenseBase& other) - { - OtherDerived& _other = other.const_cast_derived(); - eigen_internal_assert(index >= 0 && index < m_expression.size()); - Packet tmp = m_expression.template packet(index); - m_expression.template writePacket(index, - _other.template packet(index) - ); - _other.template writePacket(index, tmp); - } - - EIGEN_DEVICE_FUNC - ExpressionType& expression() const { return m_expression; } - - protected: - ExpressionType& m_expression; -}; - -#endif - -#ifdef EIGEN_ENABLE_EVALUATORS - namespace internal { // Overload default assignPacket behavior for swapping them @@ -189,8 +60,6 @@ public: } // namespace internal -#endif - } // end namespace Eigen #endif // EIGEN_SWAP_H diff --git a/Eigen/src/Core/Transpose.h b/Eigen/src/Core/Transpose.h index dd6180a8f..144bb2c01 100644 --- a/Eigen/src/Core/Transpose.h +++ b/Eigen/src/Core/Transpose.h @@ -42,18 +42,10 @@ struct traits > ColsAtCompileTime = MatrixType::RowsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxColsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxRowsAtCompileTime, -#ifndef EIGEN_TEST_EVALUATORS FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, Flags0 = MatrixTypeNestedPlain::Flags & ~(LvalueBit | NestByRefBit), Flags1 = Flags0 | FlagsLvalueBit, Flags = Flags1 ^ RowMajorBit, - CoeffReadCost = MatrixTypeNestedPlain::CoeffReadCost, -#else - FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, - Flags0 = MatrixTypeNestedPlain::Flags & ~(LvalueBit | NestByRefBit), - Flags1 = Flags0 | FlagsLvalueBit, - Flags = Flags1 ^ RowMajorBit, -#endif InnerStrideAtCompileTime = inner_stride_at_compile_time::ret, OuterStrideAtCompileTime = outer_stride_at_compile_time::ret }; @@ -109,7 +101,6 @@ struct TransposeImpl_base } // end namespace internal -#ifdef EIGEN_TEST_EVALUATORS // Generic API dispatcher template class TransposeImpl @@ -118,7 +109,6 @@ class TransposeImpl public: typedef typename internal::generic_xpr_base >::type Base; }; -#endif template class TransposeImpl : public internal::TransposeImpl_base::type @@ -141,59 +131,6 @@ template class TransposeImpl inline ScalarWithConstIfNotLvalue* data() { return derived().nestedExpression().data(); } inline const Scalar* data() const { return derived().nestedExpression().data(); } - -#ifndef EIGEN_TEST_EVALUATORS - - EIGEN_DEVICE_FUNC - inline ScalarWithConstIfNotLvalue& coeffRef(Index rowId, Index colId) - { - EIGEN_STATIC_ASSERT_LVALUE(MatrixType) - return derived().nestedExpression().const_cast_derived().coeffRef(colId, rowId); - } - - EIGEN_DEVICE_FUNC - inline ScalarWithConstIfNotLvalue& coeffRef(Index index) - { - EIGEN_STATIC_ASSERT_LVALUE(MatrixType) - return derived().nestedExpression().const_cast_derived().coeffRef(index); - } - - EIGEN_DEVICE_FUNC - inline CoeffReturnType coeff(Index rowId, Index colId) const - { - return derived().nestedExpression().coeff(colId, rowId); - } - - EIGEN_DEVICE_FUNC - inline CoeffReturnType coeff(Index index) const - { - return derived().nestedExpression().coeff(index); - } - - template - inline const PacketScalar packet(Index rowId, Index colId) const - { - return derived().nestedExpression().template packet(colId, rowId); - } - - template - inline void writePacket(Index rowId, Index colId, const PacketScalar& x) - { - derived().nestedExpression().const_cast_derived().template writePacket(colId, rowId, x); - } - - template - inline const PacketScalar packet(Index index) const - { - return derived().nestedExpression().template packet(index); - } - - template - inline void writePacket(Index index, const PacketScalar& x) - { - derived().nestedExpression().const_cast_derived().template writePacket(index, x); - } -#endif // FIXME: shall we keep the const version of coeffRef? EIGEN_DEVICE_FUNC @@ -446,14 +383,6 @@ void check_for_aliasing(const Dst &dst, const Src &src) } // end namespace internal -#ifndef EIGEN_TEST_EVALUATORS -template -template -void DenseBase::checkTransposeAliasing(const OtherDerived& other) const -{ - internal::checkTransposeAliasing_impl::run(derived(), other); -} -#endif // EIGEN_TEST_EVALUATORS #endif // EIGEN_NO_DEBUG } // end namespace Eigen diff --git a/Eigen/src/Core/TriangularMatrix.h b/Eigen/src/Core/TriangularMatrix.h index cc585bc6c..0d315dd50 100644 --- a/Eigen/src/Core/TriangularMatrix.h +++ b/Eigen/src/Core/TriangularMatrix.h @@ -32,9 +32,6 @@ template class TriangularBase : public EigenBase enum { Mode = internal::traits::Mode, -#ifndef EIGEN_TEST_EVALUATORS - CoeffReadCost = internal::traits::CoeffReadCost, -#endif RowsAtCompileTime = internal::traits::RowsAtCompileTime, ColsAtCompileTime = internal::traits::ColsAtCompileTime, MaxRowsAtCompileTime = internal::traits::MaxRowsAtCompileTime, @@ -177,21 +174,10 @@ struct traits > : traits enum { Mode = _Mode, Flags = (MatrixTypeNestedCleaned::Flags & (HereditaryBits | LvalueBit) & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit))) -#ifndef EIGEN_TEST_EVALUATORS - , - CoeffReadCost = MatrixTypeNestedCleaned::CoeffReadCost -#endif }; }; } -#ifndef EIGEN_TEST_EVALUATORS -template -struct TriangularProduct; -#endif - template class TriangularViewImpl; template class TriangularView @@ -270,7 +256,6 @@ template class TriangularView return m_matrix.transpose(); } -#ifdef EIGEN_TEST_EVALUATORS template EIGEN_DEVICE_FUNC inline const Solve @@ -287,7 +272,6 @@ template class TriangularView #else using Base::solve; #endif -#endif // EIGEN_TEST_EVALUATORS EIGEN_DEVICE_FUNC const SelfAdjointView selfadjointView() const @@ -348,8 +332,6 @@ template class TriangularViewImpl<_Mat EIGEN_DEVICE_FUNC inline Index innerStride() const { return derived().nestedExpression().innerStride(); } -#ifdef EIGEN_TEST_EVALUATORS - /** \sa MatrixBase::operator+=() */ template EIGEN_DEVICE_FUNC @@ -365,16 +347,6 @@ template class TriangularViewImpl<_Mat return derived(); } -#else - /** \sa MatrixBase::operator+=() */ - template - EIGEN_DEVICE_FUNC - TriangularViewType& operator+=(const DenseBase& other) { return *this = derived().nestedExpression() + other.derived(); } - /** \sa MatrixBase::operator-=() */ - template - EIGEN_DEVICE_FUNC - TriangularViewType& operator-=(const DenseBase& other) { return *this = derived().nestedExpression() - other.derived(); } -#endif /** \sa MatrixBase::operator*=() */ EIGEN_DEVICE_FUNC TriangularViewType& operator*=(const typename internal::traits::Scalar& other) { return *this = derived().nestedExpression() * other; } @@ -437,8 +409,6 @@ template class TriangularViewImpl<_Mat EIGEN_DEVICE_FUNC void lazyAssign(const MatrixBase& other); -#ifdef EIGEN_TEST_EVALUATORS - /** Efficient triangular matrix times vector/matrix product */ template EIGEN_DEVICE_FUNC @@ -456,30 +426,6 @@ template class TriangularViewImpl<_Mat { return Product(lhs.derived(),rhs.derived()); } - -#else // EIGEN_TEST_EVALUATORS - /** Efficient triangular matrix times vector/matrix product */ - template - EIGEN_DEVICE_FUNC - TriangularProduct - operator*(const MatrixBase& rhs) const - { - return TriangularProduct - - (derived().nestedExpression(), rhs.derived()); - } - - /** Efficient vector/matrix times triangular matrix product */ - template friend - EIGEN_DEVICE_FUNC - TriangularProduct - operator*(const MatrixBase& lhs, const TriangularViewImpl& rhs) - { - return TriangularProduct - - (lhs.derived(),rhs.derived().nestedExpression()); - } -#endif template EIGEN_DEVICE_FUNC @@ -490,14 +436,6 @@ template class TriangularViewImpl<_Mat EIGEN_DEVICE_FUNC void solveInPlace(const MatrixBase& other) const; -#ifndef EIGEN_TEST_EVALUATORS - template - EIGEN_DEVICE_FUNC - inline const internal::triangular_solve_retval - solve(const MatrixBase& other) const - { return solve(other); } -#endif // EIGEN_TEST_EVALUATORS - template EIGEN_DEVICE_FUNC void solveInPlace(const MatrixBase& other) const @@ -507,11 +445,7 @@ template class TriangularViewImpl<_Mat EIGEN_DEVICE_FUNC void swap(TriangularBase const & other) { - #ifdef EIGEN_TEST_EVALUATORS call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op()); - #else - TriangularView,Mode>(const_cast(derived().nestedExpression())).lazyAssign(other.const_cast_derived().nestedExpression()); - #endif } // TODO: this overload is ambiguous and it should be deprecated (Gael) @@ -519,65 +453,8 @@ template class TriangularViewImpl<_Mat EIGEN_DEVICE_FUNC void swap(MatrixBase const & other) { - #ifdef EIGEN_TEST_EVALUATORS call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op()); - #else - SwapWrapper swaper(const_cast(derived().nestedExpression())); - TriangularView,Mode>(swaper).lazyAssign(other.derived()); - #endif - } - -#ifndef EIGEN_TEST_EVALUATORS - - // TODO simplify the following: - template - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE TriangularViewType& operator=(const ProductBase& other) - { - setZero(); - return assignProduct(other,1); - } - - template - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE TriangularViewType& operator+=(const ProductBase& other) - { - return assignProduct(other,1); } - - template - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE TriangularViewType& operator-=(const ProductBase& other) - { - return assignProduct(other,-1); - } - - - template - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE TriangularViewType& operator=(const ScaledProduct& other) - { - setZero(); - return assignProduct(other,other.alpha()); - } - - template - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE TriangularViewType& operator+=(const ScaledProduct& other) - { - return assignProduct(other,other.alpha()); - } - - template - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE TriangularViewType& operator-=(const ScaledProduct& other) - { - return assignProduct(other,-other.alpha()); - } - -#endif // EIGEN_TEST_EVALUATORS - -#ifdef EIGEN_TEST_EVALUATORS template EIGEN_DEVICE_FUNC @@ -590,194 +467,12 @@ template class TriangularViewImpl<_Mat template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TriangularViewType& _assignProduct(const ProductType& prod, const Scalar& alpha); - - protected: -#else - protected: - template - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE TriangularViewType& assignProduct(const ProductBase& prod, const Scalar& alpha); -#endif }; /*************************************************************************** * Implementation of triangular evaluation/assignment ***************************************************************************/ -namespace internal { - -#ifndef EIGEN_TEST_EVALUATORS - -template -struct triangular_assignment_selector -{ - enum { - col = (UnrollCount-1) / Derived1::RowsAtCompileTime, - row = (UnrollCount-1) % Derived1::RowsAtCompileTime - }; - - typedef typename Derived1::Scalar Scalar; - - EIGEN_DEVICE_FUNC - static inline void run(Derived1 &dst, const Derived2 &src) - { - triangular_assignment_selector::run(dst, src); - - eigen_assert( Mode == Upper || Mode == Lower - || Mode == StrictlyUpper || Mode == StrictlyLower - || Mode == UnitUpper || Mode == UnitLower); - if((Mode == Upper && row <= col) - || (Mode == Lower && row >= col) - || (Mode == StrictlyUpper && row < col) - || (Mode == StrictlyLower && row > col) - || (Mode == UnitUpper && row < col) - || (Mode == UnitLower && row > col)) - dst.copyCoeff(row, col, src); - else if(ClearOpposite) - { - if (Mode&UnitDiag && row==col) - dst.coeffRef(row, col) = Scalar(1); - else - dst.coeffRef(row, col) = Scalar(0); - } - } -}; - -// prevent buggy user code from causing an infinite recursion -template -struct triangular_assignment_selector -{ - EIGEN_DEVICE_FUNC - static inline void run(Derived1 &, const Derived2 &) {} -}; - -template -struct triangular_assignment_selector -{ - typedef typename Derived1::Index Index; - typedef typename Derived1::Scalar Scalar; - EIGEN_DEVICE_FUNC - static inline void run(Derived1 &dst, const Derived2 &src) - { - for(Index j = 0; j < dst.cols(); ++j) - { - Index maxi = (std::min)(j, dst.rows()-1); - for(Index i = 0; i <= maxi; ++i) - dst.copyCoeff(i, j, src); - if (ClearOpposite) - for(Index i = maxi+1; i < dst.rows(); ++i) - dst.coeffRef(i, j) = Scalar(0); - } - } -}; - -template -struct triangular_assignment_selector -{ - typedef typename Derived1::Index Index; - EIGEN_DEVICE_FUNC - static inline void run(Derived1 &dst, const Derived2 &src) - { - for(Index j = 0; j < dst.cols(); ++j) - { - for(Index i = j; i < dst.rows(); ++i) - dst.copyCoeff(i, j, src); - Index maxi = (std::min)(j, dst.rows()); - if (ClearOpposite) - for(Index i = 0; i < maxi; ++i) - dst.coeffRef(i, j) = static_cast(0); - } - } -}; - -template -struct triangular_assignment_selector -{ - typedef typename Derived1::Index Index; - typedef typename Derived1::Scalar Scalar; - EIGEN_DEVICE_FUNC - static inline void run(Derived1 &dst, const Derived2 &src) - { - for(Index j = 0; j < dst.cols(); ++j) - { - Index maxi = (std::min)(j, dst.rows()); - for(Index i = 0; i < maxi; ++i) - dst.copyCoeff(i, j, src); - if (ClearOpposite) - for(Index i = maxi; i < dst.rows(); ++i) - dst.coeffRef(i, j) = Scalar(0); - } - } -}; - -template -struct triangular_assignment_selector -{ - typedef typename Derived1::Index Index; - EIGEN_DEVICE_FUNC - static inline void run(Derived1 &dst, const Derived2 &src) - { - for(Index j = 0; j < dst.cols(); ++j) - { - for(Index i = j+1; i < dst.rows(); ++i) - dst.copyCoeff(i, j, src); - Index maxi = (std::min)(j, dst.rows()-1); - if (ClearOpposite) - for(Index i = 0; i <= maxi; ++i) - dst.coeffRef(i, j) = static_cast(0); - } - } -}; - -template -struct triangular_assignment_selector -{ - typedef typename Derived1::Index Index; - EIGEN_DEVICE_FUNC - static inline void run(Derived1 &dst, const Derived2 &src) - { - for(Index j = 0; j < dst.cols(); ++j) - { - Index maxi = (std::min)(j, dst.rows()); - for(Index i = 0; i < maxi; ++i) - dst.copyCoeff(i, j, src); - if (ClearOpposite) - { - for(Index i = maxi+1; i < dst.rows(); ++i) - dst.coeffRef(i, j) = 0; - } - } - dst.diagonal().setOnes(); - } -}; -template -struct triangular_assignment_selector -{ - typedef typename Derived1::Index Index; - EIGEN_DEVICE_FUNC - static inline void run(Derived1 &dst, const Derived2 &src) - { - for(Index j = 0; j < dst.cols(); ++j) - { - Index maxi = (std::min)(j, dst.rows()); - for(Index i = maxi+1; i < dst.rows(); ++i) - dst.copyCoeff(i, j, src); - if (ClearOpposite) - { - for(Index i = 0; i < maxi; ++i) - dst.coeffRef(i, j) = 0; - } - } - dst.diagonal().setOnes(); - } -}; - -#endif // EIGEN_TEST_EVALUATORS - -} // end namespace internal - -#ifdef EIGEN_TEST_EVALUATORS - // FIXME should we keep that possibility template template @@ -816,84 +511,6 @@ void TriangularViewImpl::lazyAssign(const TriangularBas internal::call_assignment(derived().noalias(), other.derived()); } -#else - -// FIXME should we keep that possibility -template -template -inline TriangularView& -TriangularViewImpl::operator=(const MatrixBase& other) -{ - if(OtherDerived::Flags & EvalBeforeAssigningBit) - { - typename internal::plain_matrix_type::type other_evaluated(other.rows(), other.cols()); - other_evaluated.template triangularView().lazyAssign(other.derived()); - lazyAssign(other_evaluated); - } - else - lazyAssign(other.derived()); - return derived(); -} - -// FIXME should we keep that possibility -template -template -void TriangularViewImpl::lazyAssign(const MatrixBase& other) -{ - enum { - unroll = MatrixType::SizeAtCompileTime != Dynamic - && internal::traits::CoeffReadCost != Dynamic - && MatrixType::SizeAtCompileTime*internal::traits::CoeffReadCost/2 <= EIGEN_UNROLLING_LIMIT - }; - eigen_assert(derived().rows() == other.rows() && derived().cols() == other.cols()); - - internal::triangular_assignment_selector - ::run(derived().nestedExpression().const_cast_derived(), other.derived()); -} - - - -template -template -inline TriangularView& -TriangularViewImpl::operator=(const TriangularBase& other) -{ - eigen_assert(Mode == int(OtherDerived::Mode)); - if(internal::traits::Flags & EvalBeforeAssigningBit) - { - typename OtherDerived::DenseMatrixType other_evaluated(other.rows(), other.cols()); - other_evaluated.template triangularView().lazyAssign(other.derived().nestedExpression()); - lazyAssign(other_evaluated); - } - else - lazyAssign(other.derived().nestedExpression()); - return derived(); -} - -template -template -void TriangularViewImpl::lazyAssign(const TriangularBase& other) -{ - enum { - unroll = MatrixType::SizeAtCompileTime != Dynamic - && internal::traits::CoeffReadCost != Dynamic - && MatrixType::SizeAtCompileTime * internal::traits::CoeffReadCost / 2 - <= EIGEN_UNROLLING_LIMIT - }; - eigen_assert(derived().rows() == other.rows() && derived().cols() == other.cols()); - - internal::triangular_assignment_selector - ::run(derived().nestedExpression().const_cast_derived(), other.derived().nestedExpression()); -} - -#endif // EIGEN_TEST_EVALUATORS - /*************************************************************************** * Implementation of TriangularBase methods ***************************************************************************/ @@ -914,31 +531,6 @@ void TriangularBase::evalTo(MatrixBase &other) const evalToLazy(other.derived()); } -#ifndef EIGEN_TEST_EVALUATORS - -/** Assigns a triangular or selfadjoint matrix to a dense matrix. - * If the matrix is triangular, the opposite part is set to zero. */ -template -template -void TriangularBase::evalToLazy(MatrixBase &other) const -{ - enum { - unroll = DenseDerived::SizeAtCompileTime != Dynamic - && internal::traits::CoeffReadCost != Dynamic - && DenseDerived::SizeAtCompileTime * internal::traits::CoeffReadCost / 2 - <= EIGEN_UNROLLING_LIMIT - }; - other.derived().resize(this->rows(), this->cols()); - - internal::triangular_assignment_selector - ::MatrixTypeNestedCleaned, Derived::Mode, - unroll ? int(DenseDerived::SizeAtCompileTime) : Dynamic, - true // clear the opposite triangular part - >::run(other.derived(), derived().nestedExpression()); -} - -#endif // EIGEN_TEST_EVALUATORS - /*************************************************************************** * Implementation of TriangularView methods ***************************************************************************/ @@ -1028,8 +620,6 @@ bool MatrixBase::isLowerTriangular(const RealScalar& prec) const } -#ifdef EIGEN_ENABLE_EVALUATORS - /*************************************************************************** **************************************************************************** * Evaluators and Assignment of triangular expressions @@ -1268,7 +858,6 @@ struct triangular_assignment_loop } // end namespace internal -#ifdef EIGEN_TEST_EVALUATORS /** Assigns a triangular or selfadjoint matrix to a dense matrix. * If the matrix is triangular, the opposite part is set to zero. */ template @@ -1315,11 +904,7 @@ struct Assignment, internal::sub_ass } }; - } // end namespace internal -#endif - -#endif // EIGEN_ENABLE_EVALUATORS } // end namespace Eigen diff --git a/Eigen/src/Core/VectorwiseOp.h b/Eigen/src/Core/VectorwiseOp.h index 1a9eead43..a8130e902 100644 --- a/Eigen/src/Core/VectorwiseOp.h +++ b/Eigen/src/Core/VectorwiseOp.h @@ -48,25 +48,9 @@ struct traits > ColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = Direction==Vertical ? 1 : MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::MaxColsAtCompileTime, -#ifndef EIGEN_TEST_EVALUATORS - Flags0 = (unsigned int)_MatrixTypeNested::Flags & HereditaryBits, - Flags = (Flags0 & ~RowMajorBit) | (RowsAtCompileTime == 1 ? RowMajorBit : 0), -#else Flags = RowsAtCompileTime == 1 ? RowMajorBit : 0, -#endif TraversalSize = Direction==Vertical ? MatrixType::RowsAtCompileTime : MatrixType::ColsAtCompileTime }; -#ifndef EIGEN_TEST_EVALUATORS - #if EIGEN_GNUC_AT_LEAST(3,4) - typedef typename MemberOp::template Cost CostOpType; - #else - typedef typename MemberOp::template Cost CostOpType; - #endif - enum { - CoeffReadCost = TraversalSize==Dynamic ? Dynamic - : TraversalSize * traits<_MatrixTypeNested>::CoeffReadCost + int(CostOpType::value) - }; -#endif }; } diff --git a/Eigen/src/Core/Visitor.h b/Eigen/src/Core/Visitor.h index 76d452d9a..810ec28e7 100644 --- a/Eigen/src/Core/Visitor.h +++ b/Eigen/src/Core/Visitor.h @@ -53,7 +53,6 @@ struct visitor_impl } }; -#ifdef EIGEN_ENABLE_EVALUATORS // evaluator adaptor template class visitor_evaluator @@ -81,7 +80,6 @@ protected: typename internal::evaluator::nestedType m_evaluator; const XprType &m_xpr; }; -#endif } // end namespace internal /** Applies the visitor \a visitor to the whole coefficients of the matrix or vector. @@ -105,7 +103,6 @@ template template void DenseBase::visit(Visitor& visitor) const { -#ifdef EIGEN_TEST_EVALUATORS typedef typename internal::visitor_evaluator ThisEvaluator; ThisEvaluator thisEval(derived()); @@ -117,16 +114,6 @@ void DenseBase::visit(Visitor& visitor) const return internal::visitor_impl::run(thisEval, visitor); -#else - enum { unroll = SizeAtCompileTime != Dynamic - && CoeffReadCost != Dynamic - && (SizeAtCompileTime == 1 || internal::functor_traits::Cost != Dynamic) - && SizeAtCompileTime * CoeffReadCost + (SizeAtCompileTime-1) * internal::functor_traits::Cost - <= EIGEN_UNROLLING_LIMIT }; - return internal::visitor_impl::run(derived(), visitor); -#endif } namespace internal { diff --git a/Eigen/src/Core/products/CoeffBasedProduct.h b/Eigen/src/Core/products/CoeffBasedProduct.h deleted file mode 100644 index 76806fd62..000000000 --- a/Eigen/src/Core/products/CoeffBasedProduct.h +++ /dev/null @@ -1,460 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2006-2008 Benoit Jacob -// Copyright (C) 2008-2010 Gael Guennebaud -// -// This Source Code Form is subject to the terms of the Mozilla -// Public License v. 2.0. If a copy of the MPL was not distributed -// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - -#ifndef EIGEN_COEFFBASED_PRODUCT_H -#define EIGEN_COEFFBASED_PRODUCT_H - -namespace Eigen { - -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - -/********************************************************************************* -* Coefficient based product implementation. -* It is designed for the following use cases: -* - small fixed sizes -* - lazy products -*********************************************************************************/ - -/* Since the all the dimensions of the product are small, here we can rely - * on the generic Assign mechanism to evaluate the product per coeff (or packet). - * - * Note that here the inner-loops should always be unrolled. - */ - -template -struct product_coeff_impl; - -template -struct product_packet_impl; - -template -struct traits > -{ - typedef MatrixXpr XprKind; - typedef typename remove_all::type _LhsNested; - typedef typename remove_all::type _RhsNested; - typedef typename scalar_product_traits::ReturnType Scalar; - typedef typename product_promote_storage_type::StorageKind, - typename traits<_RhsNested>::StorageKind, - 0>::ret StorageKind; - typedef typename promote_index_type::Index, - typename traits<_RhsNested>::Index>::type Index; - - enum { - LhsFlags = traits<_LhsNested>::Flags, - RhsFlags = traits<_RhsNested>::Flags, - - RowsAtCompileTime = _LhsNested::RowsAtCompileTime, - ColsAtCompileTime = _RhsNested::ColsAtCompileTime, - InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime), - - MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime, - MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime, - - LhsRowMajor = LhsFlags & RowMajorBit, - RhsRowMajor = RhsFlags & RowMajorBit, - - SameType = is_same::value, - - CanVectorizeRhs = RhsRowMajor && (RhsFlags & PacketAccessBit) - && (ColsAtCompileTime == Dynamic - || ( (ColsAtCompileTime % packet_traits::size) == 0 - && (RhsFlags&AlignedBit) - ) - ), - - CanVectorizeLhs = (!LhsRowMajor) && (LhsFlags & PacketAccessBit) - && (RowsAtCompileTime == Dynamic - || ( (RowsAtCompileTime % packet_traits::size) == 0 - && (LhsFlags&AlignedBit) - ) - ), - - EvalToRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1 - : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0 - : (RhsRowMajor && !CanVectorizeLhs), - - Flags = ((unsigned int)(LhsFlags | RhsFlags) & HereditaryBits & ~RowMajorBit) - | (EvalToRowMajor ? RowMajorBit : 0) - | NestingFlags - | (CanVectorizeLhs ? (LhsFlags & AlignedBit) : 0) - | (CanVectorizeRhs ? (RhsFlags & AlignedBit) : 0) - // TODO enable vectorization for mixed types - | (SameType && (CanVectorizeLhs || CanVectorizeRhs) ? PacketAccessBit : 0), -#ifndef EIGEN_TEST_EVALUATORS - LhsCoeffReadCost = traits<_LhsNested>::CoeffReadCost, - RhsCoeffReadCost = traits<_RhsNested>::CoeffReadCost, - CoeffReadCost = (InnerSize == Dynamic || LhsCoeffReadCost==Dynamic || RhsCoeffReadCost==Dynamic || NumTraits::AddCost==Dynamic || NumTraits::MulCost==Dynamic) ? Dynamic - : InnerSize * (NumTraits::MulCost + LhsCoeffReadCost + RhsCoeffReadCost) - + (InnerSize - 1) * NumTraits::AddCost, -#endif - /* CanVectorizeInner deserves special explanation. It does not affect the product flags. It is not used outside - * of Product. If the Product itself is not a packet-access expression, there is still a chance that the inner - * loop of the product might be vectorized. This is the meaning of CanVectorizeInner. Since it doesn't affect - * the Flags, it is safe to make this value depend on ActualPacketAccessBit, that doesn't affect the ABI. - */ - CanVectorizeInner = SameType - && LhsRowMajor - && (!RhsRowMajor) - && (LhsFlags & RhsFlags & ActualPacketAccessBit) - && (LhsFlags & RhsFlags & AlignedBit) - && (InnerSize % packet_traits::size == 0) - }; -}; - -} // end namespace internal - -#ifndef EIGEN_TEST_EVALUATORS - -template -class CoeffBasedProduct - : internal::no_assignment_operator, - public MatrixBase > -{ - public: - - typedef MatrixBase Base; - EIGEN_DENSE_PUBLIC_INTERFACE(CoeffBasedProduct) - typedef typename Base::PlainObject PlainObject; - - private: - - typedef typename internal::traits::_LhsNested _LhsNested; - typedef typename internal::traits::_RhsNested _RhsNested; - - enum { - PacketSize = internal::packet_traits::size, - InnerSize = internal::traits::InnerSize, - Unroll = CoeffReadCost != Dynamic && CoeffReadCost <= EIGEN_UNROLLING_LIMIT, - CanVectorizeInner = internal::traits::CanVectorizeInner - }; - - typedef internal::product_coeff_impl ScalarCoeffImpl; - - typedef CoeffBasedProduct LazyCoeffBasedProductType; - - public: - - EIGEN_DEVICE_FUNC - inline CoeffBasedProduct(const CoeffBasedProduct& other) - : Base(), m_lhs(other.m_lhs), m_rhs(other.m_rhs) - {} - - template - EIGEN_DEVICE_FUNC - inline CoeffBasedProduct(const Lhs& lhs, const Rhs& rhs) - : m_lhs(lhs), m_rhs(rhs) - { - // we don't allow taking products of matrices of different real types, as that wouldn't be vectorizable. - // We still allow to mix T and complex. - EIGEN_STATIC_ASSERT((internal::scalar_product_traits::Defined), - YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) - eigen_assert(lhs.cols() == rhs.rows() - && "invalid matrix product" - && "if you wanted a coeff-wise or a dot product use the respective explicit functions"); - } - - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); } - - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const - { - Scalar res; - ScalarCoeffImpl::run(row, col, m_lhs, m_rhs, res); - return res; - } - - /* Allow index-based non-packet access. It is impossible though to allow index-based packed access, - * which is why we don't set the LinearAccessBit. - */ - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE const Scalar coeff(Index index) const - { - Scalar res; - const Index row = RowsAtCompileTime == 1 ? 0 : index; - const Index col = RowsAtCompileTime == 1 ? index : 0; - ScalarCoeffImpl::run(row, col, m_lhs, m_rhs, res); - return res; - } - - template - EIGEN_STRONG_INLINE const PacketScalar packet(Index row, Index col) const - { - PacketScalar res; - internal::product_packet_impl - ::run(row, col, m_lhs, m_rhs, res); - return res; - } - - // Implicit conversion to the nested type (trigger the evaluation of the product) - EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE operator const PlainObject& () const - { - m_result.lazyAssign(*this); - return m_result; - } - - EIGEN_DEVICE_FUNC const _LhsNested& lhs() const { return m_lhs; } - EIGEN_DEVICE_FUNC const _RhsNested& rhs() const { return m_rhs; } - - EIGEN_DEVICE_FUNC - const Diagonal diagonal() const - { return reinterpret_cast(*this); } - - template - EIGEN_DEVICE_FUNC - const Diagonal diagonal() const - { return reinterpret_cast(*this); } - - EIGEN_DEVICE_FUNC - const Diagonal diagonal(Index index) const - { return reinterpret_cast(*this).diagonal(index); } - - protected: - typename internal::add_const_on_value_type::type m_lhs; - typename internal::add_const_on_value_type::type m_rhs; - - mutable PlainObject m_result; -}; - -namespace internal { - -// here we need to overload the nested rule for products -// such that the nested type is a const reference to a plain matrix -template -struct nested, N, PlainObject> -{ - typedef PlainObject const& type; -}; - -/*************************************************************************** -* Normal product .coeff() implementation (with meta-unrolling) -***************************************************************************/ - -/************************************** -*** Scalar path - no vectorization *** -**************************************/ - -template -struct product_coeff_impl -{ - typedef typename Lhs::Index Index; - EIGEN_DEVICE_FUNC - static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) - { - product_coeff_impl::run(row, col, lhs, rhs, res); - res += lhs.coeff(row, UnrollingIndex) * rhs.coeff(UnrollingIndex, col); - } -}; - -template -struct product_coeff_impl -{ - typedef typename Lhs::Index Index; - EIGEN_DEVICE_FUNC - static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) - { - res = lhs.coeff(row, 0) * rhs.coeff(0, col); - } -}; - -template -struct product_coeff_impl -{ - typedef typename Lhs::Index Index; - EIGEN_DEVICE_FUNC - static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar& res) - { - eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix"); - res = lhs.coeff(row, 0) * rhs.coeff(0, col); - for(Index i = 1; i < lhs.cols(); ++i) - res += lhs.coeff(row, i) * rhs.coeff(i, col); - } -}; - -/******************************************* -*** Scalar path with inner vectorization *** -*******************************************/ - -template -struct product_coeff_vectorized_unroller -{ - typedef typename Lhs::Index Index; - enum { PacketSize = packet_traits::size }; - static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres) - { - product_coeff_vectorized_unroller::run(row, col, lhs, rhs, pres); - pres = padd(pres, pmul( lhs.template packet(row, UnrollingIndex) , rhs.template packet(UnrollingIndex, col) )); - } -}; - -template -struct product_coeff_vectorized_unroller<0, Lhs, Rhs, Packet> -{ - typedef typename Lhs::Index Index; - static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres) - { - pres = pmul(lhs.template packet(row, 0) , rhs.template packet(0, col)); - } -}; - -template -struct product_coeff_impl -{ - typedef typename Lhs::PacketScalar Packet; - typedef typename Lhs::Index Index; - enum { PacketSize = packet_traits::size }; - static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) - { - Packet pres; - product_coeff_vectorized_unroller::run(row, col, lhs, rhs, pres); - res = predux(pres); - } -}; - -template -struct product_coeff_vectorized_dyn_selector -{ - typedef typename Lhs::Index Index; - static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) - { - res = lhs.row(row).transpose().cwiseProduct(rhs.col(col)).sum(); - } -}; - -// NOTE the 3 following specializations are because taking .col(0) on a vector is a bit slower -// NOTE maybe they are now useless since we have a specialization for Block -template -struct product_coeff_vectorized_dyn_selector -{ - typedef typename Lhs::Index Index; - static EIGEN_STRONG_INLINE void run(Index /*row*/, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) - { - res = lhs.transpose().cwiseProduct(rhs.col(col)).sum(); - } -}; - -template -struct product_coeff_vectorized_dyn_selector -{ - typedef typename Lhs::Index Index; - static EIGEN_STRONG_INLINE void run(Index row, Index /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) - { - res = lhs.row(row).transpose().cwiseProduct(rhs).sum(); - } -}; - -template -struct product_coeff_vectorized_dyn_selector -{ - typedef typename Lhs::Index Index; - static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) - { - res = lhs.transpose().cwiseProduct(rhs).sum(); - } -}; - -template -struct product_coeff_impl -{ - typedef typename Lhs::Index Index; - static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) - { - product_coeff_vectorized_dyn_selector::run(row, col, lhs, rhs, res); - } -}; - -/******************* -*** Packet path *** -*******************/ - -template -struct product_packet_impl -{ - typedef typename Lhs::Index Index; - static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res) - { - product_packet_impl::run(row, col, lhs, rhs, res); - res = pmadd(pset1(lhs.coeff(row, UnrollingIndex)), rhs.template packet(UnrollingIndex, col), res); - } -}; - -template -struct product_packet_impl -{ - typedef typename Lhs::Index Index; - static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res) - { - product_packet_impl::run(row, col, lhs, rhs, res); - res = pmadd(lhs.template packet(row, UnrollingIndex), pset1(rhs.coeff(UnrollingIndex, col)), res); - } -}; - -template -struct product_packet_impl -{ - typedef typename Lhs::Index Index; - static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res) - { - res = pmul(pset1(lhs.coeff(row, 0)),rhs.template packet(0, col)); - } -}; - -template -struct product_packet_impl -{ - typedef typename Lhs::Index Index; - static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res) - { - res = pmul(lhs.template packet(row, 0), pset1(rhs.coeff(0, col))); - } -}; - -template -struct product_packet_impl -{ - typedef typename Lhs::Index Index; - static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res) - { - eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix"); - res = pmul(pset1(lhs.coeff(row, 0)),rhs.template packet(0, col)); - for(Index i = 1; i < lhs.cols(); ++i) - res = pmadd(pset1(lhs.coeff(row, i)), rhs.template packet(i, col), res); - } -}; - -template -struct product_packet_impl -{ - typedef typename Lhs::Index Index; - static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res) - { - eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix"); - res = pmul(lhs.template packet(row, 0), pset1(rhs.coeff(0, col))); - for(Index i = 1; i < lhs.cols(); ++i) - res = pmadd(lhs.template packet(row, i), pset1(rhs.coeff(i, col)), res); - } -}; - -} // end namespace internal - -#endif // EIGEN_TEST_EVALUATORS - -#endif // -class GeneralProduct - : public ProductBase, Lhs, Rhs> -{ - enum { - MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) - }; - public: - EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct) - - typedef typename Lhs::Scalar LhsScalar; - typedef typename Rhs::Scalar RhsScalar; - typedef Scalar ResScalar; - - GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) - { - typedef internal::scalar_product_op BinOp; - EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar); - } - - template - inline void evalTo(Dest& dst) const - { - if((m_rhs.rows()+dst.rows()+dst.cols())<20 && m_rhs.rows()>0) - dst.noalias() = m_lhs .lazyProduct( m_rhs ); - else - { - dst.setZero(); - scaleAndAddTo(dst,Scalar(1)); - } - } - - template - inline void addTo(Dest& dst) const - { - if((m_rhs.rows()+dst.rows()+dst.cols())<20 && m_rhs.rows()>0) - dst.noalias() += m_lhs .lazyProduct( m_rhs ); - else - scaleAndAddTo(dst,Scalar(1)); - } - - template - inline void subTo(Dest& dst) const - { - if((m_rhs.rows()+dst.rows()+dst.cols())<20 && m_rhs.rows()>0) - dst.noalias() -= m_lhs .lazyProduct( m_rhs ); - else - scaleAndAddTo(dst,Scalar(-1)); - } - - template void scaleAndAddTo(Dest& dst, const Scalar& alpha) const - { - eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); - - typename internal::add_const_on_value_type::type lhs = LhsBlasTraits::extract(m_lhs); - typename internal::add_const_on_value_type::type rhs = RhsBlasTraits::extract(m_rhs); - - Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) - * RhsBlasTraits::extractScalarFactor(m_rhs); - - typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, - Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; - - typedef internal::gemm_functor< - Scalar, Index, - internal::general_matrix_matrix_product< - Index, - LhsScalar, (_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), - RhsScalar, (_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), - (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, - _ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor; - - BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), true); - - internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit); - } -}; -#endif // EIGEN_TEST_EVALUATORS - -#ifdef EIGEN_ENABLE_EVALUATORS namespace internal { template @@ -534,7 +453,6 @@ struct generic_product_impl }; } // end namespace internal -#endif // EIGEN_ENABLE_EVALUATORS } // end namespace Eigen diff --git a/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h b/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h index 06c64714a..7db3e3d38 100644 --- a/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +++ b/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h @@ -261,7 +261,6 @@ struct general_product_to_triangular_selector } }; -#ifdef EIGEN_TEST_EVALUATORS template template TriangularView& TriangularViewImpl::_assignProduct(const ProductType& prod, const Scalar& alpha) @@ -272,19 +271,7 @@ TriangularView& TriangularViewImpl::_ass return derived(); } -#else -template -template -TriangularView& TriangularViewImpl::assignProduct(const ProductBase& prod, const Scalar& alpha) -{ - eigen_assert(derived().rows() == prod.rows() && derived().cols() == prod.cols()); - general_product_to_triangular_selector - ::run(derived().nestedExpression().const_cast_derived(), prod.derived(), alpha); - - return derived(); -} -#endif } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H diff --git a/Eigen/src/Core/products/SelfadjointMatrixMatrix.h b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h index 0ab3f3a56..4e507b6cf 100644 --- a/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +++ b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h @@ -459,58 +459,6 @@ EIGEN_DONT_INLINE void product_selfadjoint_matrix -struct traits > - : traits, Lhs, Rhs> > -{}; -} - -template -struct SelfadjointProductMatrix - : public ProductBase, Lhs, Rhs > -{ - EIGEN_PRODUCT_PUBLIC_INTERFACE(SelfadjointProductMatrix) - - SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} - - enum { - LhsIsUpper = (LhsMode&(Upper|Lower))==Upper, - LhsIsSelfAdjoint = (LhsMode&SelfAdjoint)==SelfAdjoint, - RhsIsUpper = (RhsMode&(Upper|Lower))==Upper, - RhsIsSelfAdjoint = (RhsMode&SelfAdjoint)==SelfAdjoint - }; - - template void scaleAndAddTo(Dest& dst, const Scalar& alpha) const - { - eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); - - typename internal::add_const_on_value_type::type lhs = LhsBlasTraits::extract(m_lhs); - typename internal::add_const_on_value_type::type rhs = RhsBlasTraits::extract(m_rhs); - - Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) - * RhsBlasTraits::extractScalarFactor(m_rhs); - - internal::product_selfadjoint_matrix::Flags &RowMajorBit) ? RowMajor : ColMajor, LhsIsSelfAdjoint, - NumTraits::IsComplex && EIGEN_LOGICAL_XOR(LhsIsUpper,bool(LhsBlasTraits::NeedToConjugate)), - EIGEN_LOGICAL_XOR(RhsIsUpper, - internal::traits::Flags &RowMajorBit) ? RowMajor : ColMajor, RhsIsSelfAdjoint, - NumTraits::IsComplex && EIGEN_LOGICAL_XOR(RhsIsUpper,bool(RhsBlasTraits::NeedToConjugate)), - internal::traits::Flags&RowMajorBit ? RowMajor : ColMajor> - ::run( - lhs.rows(), rhs.cols(), // sizes - &lhs.coeffRef(0,0), lhs.outerStride(), // lhs info - &rhs.coeffRef(0,0), rhs.outerStride(), // rhs info - &dst.coeffRef(0,0), dst.outerStride(), // result info - actualAlpha // alpha - ); - } -}; -#endif // EIGEN_TEST_EVALUATORS -#ifdef EIGEN_ENABLE_EVALUATORS namespace internal { template @@ -560,8 +508,6 @@ struct selfadjoint_product_impl } // end namespace internal -#endif // EIGEN_ENABLE_EVALUATORS - } // end namespace Eigen #endif // EIGEN_SELFADJOINT_MATRIX_MATRIX_H diff --git a/Eigen/src/Core/products/SelfadjointMatrixVector.h b/Eigen/src/Core/products/SelfadjointMatrixVector.h index 020205c12..d9c041f0c 100644 --- a/Eigen/src/Core/products/SelfadjointMatrixVector.h +++ b/Eigen/src/Core/products/SelfadjointMatrixVector.h @@ -168,117 +168,6 @@ EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product -struct traits > - : traits, Lhs, Rhs> > -{}; -} - -template -struct SelfadjointProductMatrix - : public ProductBase, Lhs, Rhs > -{ - EIGEN_PRODUCT_PUBLIC_INTERFACE(SelfadjointProductMatrix) - - enum { - LhsUpLo = LhsMode&(Upper|Lower) - }; - - SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} - - template void scaleAndAddTo(Dest& dest, const Scalar& alpha) const - { - typedef typename Dest::Scalar ResScalar; - typedef typename Base::RhsScalar RhsScalar; - typedef Map, Aligned> MappedDest; - - eigen_assert(dest.rows()==m_lhs.rows() && dest.cols()==m_rhs.cols()); - - typename internal::add_const_on_value_type::type lhs = LhsBlasTraits::extract(m_lhs); - typename internal::add_const_on_value_type::type rhs = RhsBlasTraits::extract(m_rhs); - - Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) - * RhsBlasTraits::extractScalarFactor(m_rhs); - - enum { - EvalToDest = (Dest::InnerStrideAtCompileTime==1), - UseRhs = (_ActualRhsType::InnerStrideAtCompileTime==1) - }; - - internal::gemv_static_vector_if static_dest; - internal::gemv_static_vector_if static_rhs; - - ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(), - EvalToDest ? dest.data() : static_dest.data()); - - ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,rhs.size(), - UseRhs ? const_cast(rhs.data()) : static_rhs.data()); - - if(!EvalToDest) - { - #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN - Index size = dest.size(); - EIGEN_DENSE_STORAGE_CTOR_PLUGIN - #endif - MappedDest(actualDestPtr, dest.size()) = dest; - } - - if(!UseRhs) - { - #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN - Index size = rhs.size(); - EIGEN_DENSE_STORAGE_CTOR_PLUGIN - #endif - Map(actualRhsPtr, rhs.size()) = rhs; - } - - - internal::selfadjoint_matrix_vector_product::Flags&RowMajorBit) ? RowMajor : ColMajor, int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)>::run - ( - lhs.rows(), // size - &lhs.coeffRef(0,0), lhs.outerStride(), // lhs info - actualRhsPtr, 1, // rhs info - actualDestPtr, // result info - actualAlpha // scale factor - ); - - if(!EvalToDest) - dest = MappedDest(actualDestPtr, dest.size()); - } -}; - -namespace internal { -template -struct traits > - : traits, Lhs, Rhs> > -{}; -} - -template -struct SelfadjointProductMatrix - : public ProductBase, Lhs, Rhs > -{ - EIGEN_PRODUCT_PUBLIC_INTERFACE(SelfadjointProductMatrix) - - enum { - RhsUpLo = RhsMode&(Upper|Lower) - }; - - SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} - - template void scaleAndAddTo(Dest& dest, const Scalar& alpha) const - { - // let's simply transpose the product - Transpose destT(dest); - SelfadjointProductMatrix, int(RhsUpLo)==Upper ? Lower : Upper, false, - Transpose, 0, true>(m_rhs.transpose(), m_lhs.transpose()).scaleAndAddTo(destT, alpha); - } -}; - -#else // EIGEN_TEST_EVALUATORS - namespace internal { template @@ -378,8 +267,6 @@ struct selfadjoint_product_impl } // end namespace internal -#endif // EIGEN_TEST_EVALUATORS - } // end namespace Eigen #endif // EIGEN_SELFADJOINT_MATRIX_VECTOR_H diff --git a/Eigen/src/Core/products/TriangularMatrixMatrix.h b/Eigen/src/Core/products/TriangularMatrixMatrix.h index fda6e2486..c2d0817ea 100644 --- a/Eigen/src/Core/products/TriangularMatrixMatrix.h +++ b/Eigen/src/Core/products/TriangularMatrixMatrix.h @@ -368,59 +368,9 @@ EIGEN_DONT_INLINE void product_triangular_matrix_matrix -struct traits > - : traits, Lhs, Rhs> > -{}; -#endif } // end namespace internal -#ifndef EIGEN_TEST_EVALUATORS -template -struct TriangularProduct - : public ProductBase, Lhs, Rhs > -{ - EIGEN_PRODUCT_PUBLIC_INTERFACE(TriangularProduct) - - TriangularProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} - - template void scaleAndAddTo(Dest& dst, const Scalar& alpha) const - { - typename internal::add_const_on_value_type::type lhs = LhsBlasTraits::extract(m_lhs); - typename internal::add_const_on_value_type::type rhs = RhsBlasTraits::extract(m_rhs); - - Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) - * RhsBlasTraits::extractScalarFactor(m_rhs); - - typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,Scalar,Scalar, - Lhs::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime, Lhs::MaxColsAtCompileTime,4> BlockingType; - - enum { IsLower = (Mode&Lower) == Lower }; - Index stripedRows = ((!LhsIsTriangular) || (IsLower)) ? lhs.rows() : (std::min)(lhs.rows(),lhs.cols()); - Index stripedCols = ((LhsIsTriangular) || (!IsLower)) ? rhs.cols() : (std::min)(rhs.cols(),rhs.rows()); - Index stripedDepth = LhsIsTriangular ? ((!IsLower) ? lhs.cols() : (std::min)(lhs.cols(),lhs.rows())) - : ((IsLower) ? rhs.rows() : (std::min)(rhs.rows(),rhs.cols())); - - BlockingType blocking(stripedRows, stripedCols, stripedDepth); - - internal::product_triangular_matrix_matrix::Flags&RowMajorBit) ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate, - (internal::traits<_ActualRhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate, - (internal::traits::Flags&RowMajorBit) ? RowMajor : ColMajor> - ::run( - stripedRows, stripedCols, stripedDepth, // sizes - &lhs.coeffRef(0,0), lhs.outerStride(), // lhs info - &rhs.coeffRef(0,0), rhs.outerStride(), // rhs info - &dst.coeffRef(0,0), dst.outerStride(), // result info - actualAlpha, blocking - ); - } -}; -#endif // EIGEN_TEST_EVALUATORS -#ifdef EIGEN_ENABLE_EVALUATORS namespace internal { template struct triangular_product_impl @@ -470,7 +420,6 @@ struct triangular_product_impl }; } // end namespace internal -#endif // EIGEN_ENABLE_EVALUATORS } // end namespace Eigen diff --git a/Eigen/src/Core/products/TriangularMatrixVector.h b/Eigen/src/Core/products/TriangularMatrixVector.h index 19167c232..92d64e384 100644 --- a/Eigen/src/Core/products/TriangularMatrixVector.h +++ b/Eigen/src/Core/products/TriangularMatrixVector.h @@ -157,61 +157,11 @@ EIGEN_DONT_INLINE void triangular_matrix_vector_product -struct traits > - : traits, Lhs, Rhs> > -{}; - -template -struct traits > - : traits, Lhs, Rhs> > -{}; -#endif - template struct trmv_selector; } // end namespace internal -#ifndef EIGEN_TEST_EVALUATORS -template -struct TriangularProduct - : public ProductBase, Lhs, Rhs > -{ - EIGEN_PRODUCT_PUBLIC_INTERFACE(TriangularProduct) - - TriangularProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} - - template void scaleAndAddTo(Dest& dst, const Scalar& alpha) const - { - eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); - - internal::trmv_selector::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(m_lhs, m_rhs, dst, alpha); - } -}; - -template -struct TriangularProduct - : public ProductBase, Lhs, Rhs > -{ - EIGEN_PRODUCT_PUBLIC_INTERFACE(TriangularProduct) - - TriangularProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} - - template void scaleAndAddTo(Dest& dst, const Scalar& alpha) const - { - eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); - - Transpose dstT(dst); - internal::trmv_selector<(Mode & (UnitDiag|ZeroDiag)) | ((Mode & Lower) ? Upper : Lower), - (int(internal::traits::Flags)&RowMajorBit) ? ColMajor : RowMajor> - ::run(m_rhs.transpose(),m_lhs.transpose(), dstT, alpha); - } -}; - -#else // EIGEN_TEST_EVALUATORS namespace internal { template @@ -240,7 +190,6 @@ struct triangular_product_impl }; } // end namespace internal -#endif // EIGEN_TEST_EVALUATORS namespace internal { diff --git a/Eigen/src/Core/util/ForwardDeclarations.h b/Eigen/src/Core/util/ForwardDeclarations.h index 99aa9b372..9ec57468b 100644 --- a/Eigen/src/Core/util/ForwardDeclarations.h +++ b/Eigen/src/Core/util/ForwardDeclarations.h @@ -139,10 +139,6 @@ template class ArrayWrapper; template class MatrixWrapper; namespace internal { -#ifndef EIGEN_TEST_EVALUATROS -template struct solve_retval_base; -template struct solve_retval; -#endif template struct kernel_retval_base; template struct kernel_retval; template struct image_retval_base; diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h index e8ac6bee7..f9b908e22 100644 --- a/Eigen/src/Core/util/Macros.h +++ b/Eigen/src/Core/util/Macros.h @@ -374,46 +374,6 @@ namespace Eigen { * documentation in a single line. **/ -#ifndef EIGEN_TEST_EVALUATORS - -#define EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \ - typedef typename Eigen::internal::traits::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex. */ \ - typedef typename Eigen::NumTraits::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex, T were corresponding to RealScalar. */ \ - typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \ - typedef typename Eigen::internal::nested::type Nested; \ - typedef typename Eigen::internal::traits::StorageKind StorageKind; \ - typedef typename Eigen::internal::traits::Index Index; \ - enum { RowsAtCompileTime = Eigen::internal::traits::RowsAtCompileTime, \ - ColsAtCompileTime = Eigen::internal::traits::ColsAtCompileTime, \ - Flags = Eigen::internal::traits::Flags, \ - CoeffReadCost = Eigen::internal::traits::CoeffReadCost, \ - SizeAtCompileTime = Base::SizeAtCompileTime, \ - MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \ - IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; - - -#define EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \ - typedef typename Eigen::internal::traits::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex. */ \ - typedef typename Eigen::NumTraits::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex, T were corresponding to RealScalar. */ \ - typedef typename Base::PacketScalar PacketScalar; \ - typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \ - typedef typename Eigen::internal::nested::type Nested; \ - typedef typename Eigen::internal::traits::StorageKind StorageKind; \ - typedef typename Eigen::internal::traits::Index Index; \ - enum { RowsAtCompileTime = Eigen::internal::traits::RowsAtCompileTime, \ - ColsAtCompileTime = Eigen::internal::traits::ColsAtCompileTime, \ - MaxRowsAtCompileTime = Eigen::internal::traits::MaxRowsAtCompileTime, \ - MaxColsAtCompileTime = Eigen::internal::traits::MaxColsAtCompileTime, \ - Flags = Eigen::internal::traits::Flags, \ - CoeffReadCost = Eigen::internal::traits::CoeffReadCost, \ - SizeAtCompileTime = Base::SizeAtCompileTime, \ - MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \ - IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \ - using Base::derived; \ - using Base::const_cast_derived; - -#else - // TODO The EIGEN_DENSE_PUBLIC_INTERFACE should not exists anymore #define EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \ @@ -450,8 +410,6 @@ namespace Eigen { using Base::derived; \ using Base::const_cast_derived; -#endif // EIGEN_TEST_EVALUATORS - #define EIGEN_PLAIN_ENUM_MIN(a,b) (((int)a <= (int)b) ? (int)a : (int)b) #define EIGEN_PLAIN_ENUM_MAX(a,b) (((int)a >= (int)b) ? (int)a : (int)b) diff --git a/Eigen/src/Core/util/XprHelper.h b/Eigen/src/Core/util/XprHelper.h index bce009d16..f2536714e 100644 --- a/Eigen/src/Core/util/XprHelper.h +++ b/Eigen/src/Core/util/XprHelper.h @@ -125,43 +125,6 @@ template type; }; -#ifndef EIGEN_TEST_EVALUATORS -template -class compute_matrix_flags -{ - enum { - row_major_bit = Options&RowMajor ? RowMajorBit : 0, - is_dynamic_size_storage = MaxRows==Dynamic || MaxCols==Dynamic, - - aligned_bit = - ( - ((Options&DontAlign)==0) - && ( -#if EIGEN_ALIGN_STATICALLY - ((!is_dynamic_size_storage) && (((MaxCols*MaxRows*int(sizeof(Scalar))) % EIGEN_ALIGN_BYTES) == 0)) -#else - 0 -#endif - - || - -#if EIGEN_ALIGN - is_dynamic_size_storage -#else - 0 -#endif - - ) - ) ? AlignedBit : 0, - packet_access_bit = packet_traits::Vectorizable && aligned_bit ? PacketAccessBit : 0 - }; - - public: - enum { ret = LinearAccessBit | LvalueBit | DirectAccessBit | NestByRefBit | packet_access_bit | row_major_bit | aligned_bit }; -}; - -#else // EIGEN_TEST_EVALUATORS - template class compute_matrix_flags { @@ -172,9 +135,7 @@ class compute_matrix_flags // However, I (Gael) think that DirectAccessBit should only matter at the evaluation stage. enum { ret = DirectAccessBit | LvalueBit | NestByRefBit | row_major_bit }; }; -#endif -#ifdef EIGEN_ENABLE_EVALUATORS template class compute_matrix_evaluator_flags { @@ -209,8 +170,6 @@ class compute_matrix_evaluator_flags enum { ret = LinearAccessBit | DirectAccessBit | packet_access_bit | row_major_bit | aligned_bit }; }; -#endif // EIGEN_ENABLE_EVALUATORS - template struct size_at_compile_time { enum { ret = (_Rows==Dynamic || _Cols==Dynamic) ? Dynamic : _Rows * _Cols }; @@ -361,58 +320,6 @@ struct transfer_constness }; - -#ifndef EIGEN_TEST_EVALUATORS - -/** \internal Determines how a given expression should be nested into another one. - * For example, when you do a * (b+c), Eigen will determine how the expression b+c should be - * nested into the bigger product expression. The choice is between nesting the expression b+c as-is, or - * evaluating that expression b+c into a temporary variable d, and nest d so that the resulting expression is - * a*d. Evaluating can be beneficial for example if every coefficient access in the resulting expression causes - * many coefficient accesses in the nested expressions -- as is the case with matrix product for example. - * - * \param T the type of the expression being nested - * \param n the number of coefficient accesses in the nested expression for each coefficient access in the bigger expression. - * - * Note that if no evaluation occur, then the constness of T is preserved. - * - * Example. Suppose that a, b, and c are of type Matrix3d. The user forms the expression a*(b+c). - * b+c is an expression "sum of matrices", which we will denote by S. In order to determine how to nest it, - * the Product expression uses: nested::type, which turns out to be Matrix3d because the internal logic of - * nested determined that in this case it was better to evaluate the expression b+c into a temporary. On the other hand, - * since a is of type Matrix3d, the Product expression nests it as nested::type, which turns out to be - * const Matrix3d&, because the internal logic of nested determined that since a was already a matrix, there was no point - * in copying it into another matrix. - */ -template::type> struct nested -{ - enum { - // for the purpose of this test, to keep it reasonably simple, we arbitrarily choose a value of Dynamic values. - // the choice of 10000 makes it larger than any practical fixed value and even most dynamic values. - // in extreme cases where these assumptions would be wrong, we would still at worst suffer performance issues - // (poor choice of temporaries). - // it's important that this value can still be squared without integer overflowing. - DynamicAsInteger = 10000, - ScalarReadCost = NumTraits::Scalar>::ReadCost, - ScalarReadCostAsInteger = ScalarReadCost == Dynamic ? int(DynamicAsInteger) : int(ScalarReadCost), - CoeffReadCost = traits::CoeffReadCost, - CoeffReadCostAsInteger = CoeffReadCost == Dynamic ? int(DynamicAsInteger) : int(CoeffReadCost), - NAsInteger = n == Dynamic ? int(DynamicAsInteger) : n, - CostEvalAsInteger = (NAsInteger+1) * ScalarReadCostAsInteger + CoeffReadCostAsInteger, - CostNoEvalAsInteger = NAsInteger * CoeffReadCostAsInteger - }; - - typedef typename conditional< - ( (int(traits::Flags) & EvalBeforeNestingBit) || - int(CostEvalAsInteger) < int(CostNoEvalAsInteger) - ), - PlainObject, - typename ref_selector::type - >::type type; -}; - -#else - // When using evaluators, we never evaluate when assembling the expression!! // TODO: get rid of this nested class since it's just an alias for ref_selector. template struct nested @@ -420,12 +327,20 @@ template struct nested typedef typename ref_selector::type type; }; -#endif // EIGEN_TEST_EVALUATORS - -#ifdef EIGEN_ENABLE_EVALUATORS // However, we still need a mechanism to detect whether an expression which is evaluated multiple time // has to be evaluated into a temporary. -// That's the purpose of this new nested_eval helper: +// That's the purpose of this new nested_eval helper: +/** \internal Determines how a given expression should be nested when evaluated multiple times. + * For example, when you do a * (b+c), Eigen will determine how the expression b+c should be + * evaluated into the bigger product expression. The choice is between nesting the expression b+c as-is, or + * evaluating that expression b+c into a temporary variable d, and nest d so that the resulting expression is + * a*d. Evaluating can be beneficial for example if every coefficient access in the resulting expression causes + * many coefficient accesses in the nested expressions -- as is the case with matrix product for example. + * + * \param T the type of the expression being nested. + * \param n the number of coefficient accesses in the nested expression for each coefficient access in the bigger expression. + * \param PlainObject the type of the temporary if needed. + */ template::type> struct nested_eval { enum { @@ -453,7 +368,6 @@ template::type> struc typename ref_selector::type >::type type; }; -#endif template EIGEN_DEVICE_FUNC diff --git a/Eigen/src/Geometry/AlignedBox.h b/Eigen/src/Geometry/AlignedBox.h index 1d1daaa61..d6c5c1293 100644 --- a/Eigen/src/Geometry/AlignedBox.h +++ b/Eigen/src/Geometry/AlignedBox.h @@ -71,11 +71,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) template inline explicit AlignedBox(const MatrixBase& a_p) { -#ifndef EIGEN_TEST_EVALUATORS - typename internal::nested::type p(a_p.derived()); -#else typename internal::nested_eval::type p(a_p.derived()); -#endif m_min = p; m_max = p; } diff --git a/Eigen/src/Geometry/Homogeneous.h b/Eigen/src/Geometry/Homogeneous.h index 43314b04c..d1881d84d 100644 --- a/Eigen/src/Geometry/Homogeneous.h +++ b/Eigen/src/Geometry/Homogeneous.h @@ -49,9 +49,6 @@ struct traits > Flags = ColsAtCompileTime==1 ? (TmpFlags & ~RowMajorBit) : RowsAtCompileTime==1 ? (TmpFlags | RowMajorBit) : TmpFlags -#ifndef EIGEN_TEST_EVALUATORS - , CoeffReadCost = _MatrixTypeNested::CoeffReadCost -#endif // EIGEN_TEST_EVALUATORS }; }; @@ -80,39 +77,6 @@ template class Homogeneous const NestedExpression& nestedExpression() const { return m_matrix; } -#ifndef EIGEN_TEST_EVALUATORS - inline Scalar coeff(Index row, Index col) const - { - if( (int(Direction)==Vertical && row==m_matrix.rows()) - || (int(Direction)==Horizontal && col==m_matrix.cols())) - return 1; - return m_matrix.coeff(row, col); - } - - template - inline const internal::homogeneous_right_product_impl - operator* (const MatrixBase& rhs) const - { - eigen_assert(int(Direction)==Horizontal); - return internal::homogeneous_right_product_impl(m_matrix,rhs.derived()); - } - - template friend - inline const internal::homogeneous_left_product_impl - operator* (const MatrixBase& lhs, const Homogeneous& rhs) - { - eigen_assert(int(Direction)==Vertical); - return internal::homogeneous_left_product_impl(lhs.derived(),rhs.m_matrix); - } - - template friend - inline const internal::homogeneous_left_product_impl > - operator* (const Transform& lhs, const Homogeneous& rhs) - { - eigen_assert(int(Direction)==Vertical); - return internal::homogeneous_left_product_impl >(lhs,rhs.m_matrix); - } -#else template inline const Product operator* (const MatrixBase& rhs) const @@ -136,7 +100,6 @@ template class Homogeneous eigen_assert(int(Direction)==Vertical); return Product, Homogeneous>(lhs,rhs); } -#endif template EIGEN_STRONG_INLINE typename internal::result_of::type @@ -338,8 +301,6 @@ struct homogeneous_right_product_impl,Rhs> typename Rhs::Nested m_rhs; }; -#ifdef EIGEN_TEST_EVALUATORS - template struct evaluator_traits > { @@ -427,8 +388,6 @@ struct generic_product_impl, Homogeneous::cross(const MatrixBase& other) const // Note that there is no need for an expression here since the compiler // optimize such a small temporary very well (even within a complex expression) -#ifndef EIGEN_TEST_EVALUATORS - typename internal::nested::type lhs(derived()); - typename internal::nested::type rhs(other.derived()); -#else typename internal::nested_eval::type lhs(derived()); typename internal::nested_eval::type rhs(other.derived()); -#endif return typename cross_product_return_type::type( numext::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)), numext::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)), @@ -81,13 +76,8 @@ MatrixBase::cross3(const MatrixBase& other) const EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,4) EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,4) -#ifndef EIGEN_TEST_EVALUATORS - typedef typename internal::nested::type DerivedNested; - typedef typename internal::nested::type OtherDerivedNested; -#else typedef typename internal::nested_eval::type DerivedNested; typedef typename internal::nested_eval::type OtherDerivedNested; -#endif DerivedNested lhs(derived()); OtherDerivedNested rhs(other.derived()); @@ -114,13 +104,8 @@ VectorwiseOp::cross(const MatrixBase& ot EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) -#ifndef EIGEN_TEST_EVALUATORS - typename internal::nested::type mat(_expression()); - typename internal::nested::type vec(other.derived()); -#else typename internal::nested_eval::type mat(_expression()); typename internal::nested_eval::type vec(other.derived()); -#endif CrossReturnType res(_expression().rows(),_expression().cols()); if(Direction==Vertical) diff --git a/Eigen/src/Geometry/Quaternion.h b/Eigen/src/Geometry/Quaternion.h index 850940302..3f0067286 100644 --- a/Eigen/src/Geometry/Quaternion.h +++ b/Eigen/src/Geometry/Quaternion.h @@ -217,11 +217,7 @@ struct traits > typedef _Scalar Scalar; typedef Matrix<_Scalar,4,1,_Options> Coefficients; enum{ -#ifndef EIGEN_TEST_EVALUATORS - IsAligned = internal::traits::Flags & AlignedBit, -#else IsAligned = (internal::traits::EvaluatorFlags & AlignedBit) != 0, -#endif Flags = IsAligned ? (AlignedBit | LvalueBit) : LvalueBit }; }; diff --git a/Eigen/src/Geometry/Transform.h b/Eigen/src/Geometry/Transform.h index bcf3e2723..89e9cc1a4 100644 --- a/Eigen/src/Geometry/Transform.h +++ b/Eigen/src/Geometry/Transform.h @@ -62,7 +62,6 @@ struct transform_construct_from_matrix; template struct transform_take_affine_part; -#ifdef EIGEN_TEST_EVALUATORS template struct traits > { @@ -78,7 +77,6 @@ struct traits > Flags = 0 }; }; -#endif } // end namespace internal @@ -374,10 +372,8 @@ public: inline QTransform toQTransform(void) const; #endif -#ifdef EIGEN_TEST_EVALUATORS Index rows() const { return int(Mode)==int(Projective) ? m_matrix.cols() : (m_matrix.cols()-1); } Index cols() const { return m_matrix.cols(); } -#endif /** shortcut for m_matrix(row,col); * \sa MatrixBase::operator(Index,Index) const */ diff --git a/Eigen/src/Householder/HouseholderSequence.h b/Eigen/src/Householder/HouseholderSequence.h index 0c6a09861..4ded2995f 100644 --- a/Eigen/src/Householder/HouseholderSequence.h +++ b/Eigen/src/Householder/HouseholderSequence.h @@ -73,8 +73,6 @@ struct traits > }; }; -#ifdef EIGEN_TEST_EVALUATORS - struct HouseholderSequenceShape {}; template @@ -83,7 +81,6 @@ struct evaluator_traits > { typedef HouseholderSequenceShape Shape; }; -#endif template struct hseq_side_dependent_impl diff --git a/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h b/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h index 92af28cc8..98b169868 100644 --- a/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +++ b/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h @@ -87,16 +87,6 @@ class DiagonalPreconditioner x = m_invdiag.array() * b.array() ; } -#ifndef EIGEN_TEST_EVALUATORS - template inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(m_isInitialized && "DiagonalPreconditioner is not initialized."); - eigen_assert(m_invdiag.size()==b.rows() - && "DiagonalPreconditioner::solve(): invalid number of rows of the right hand side matrix b"); - return internal::solve_retval(*this, b.derived()); - } -#else template inline const Solve solve(const MatrixBase& b) const { @@ -105,31 +95,12 @@ class DiagonalPreconditioner && "DiagonalPreconditioner::solve(): invalid number of rows of the right hand side matrix b"); return Solve(*this, b.derived()); } -#endif protected: Vector m_invdiag; bool m_isInitialized; }; -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef DiagonalPreconditioner<_MatrixType> Dec; - EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(),dst); - } -}; - -} -#endif // EIGEN_TEST_EVALUATORS /** \ingroup IterativeLinearSolvers_Module * \brief A naive preconditioner which approximates any matrix as the identity matrix diff --git a/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h index b4d1d2a79..051940dc7 100644 --- a/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +++ b/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h @@ -182,24 +182,6 @@ public: ~BiCGSTAB() {} -#ifndef EIGEN_TEST_EVALUATORS - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A - * \a x0 as an initial solution. - * - * \sa compute() - */ - template - inline const internal::solve_retval_with_guess - solveWithGuess(const MatrixBase& b, const Guess& x0) const - { - eigen_assert(m_isInitialized && "BiCGSTAB is not initialized."); - eigen_assert(Base::rows()==b.rows() - && "BiCGSTAB::solve(): invalid number of rows of the right hand side matrix b"); - return internal::solve_retval_with_guess - (*this, b.derived(), x0); - } -#endif - /** \internal */ template void _solve_with_guess_impl(const Rhs& b, Dest& x) const @@ -234,25 +216,6 @@ protected: }; -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef BiCGSTAB<_MatrixType, _Preconditioner> Dec; - EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(),dst); - } -}; - -} // end namespace internal -#endif - } // end namespace Eigen #endif // EIGEN_BICGSTAB_H diff --git a/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h b/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h index b0273aaaf..f72cf86a5 100644 --- a/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +++ b/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h @@ -192,24 +192,6 @@ public: ConjugateGradient(const MatrixType& A) : Base(A) {} ~ConjugateGradient() {} - -#ifndef EIGEN_TEST_EVALUATORS - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A - * \a x0 as an initial solution. - * - * \sa compute() - */ - template - inline const internal::solve_retval_with_guess - solveWithGuess(const MatrixBase& b, const Guess& x0) const - { - eigen_assert(m_isInitialized && "ConjugateGradient is not initialized."); - eigen_assert(Base::rows()==b.rows() - && "ConjugateGradient::solve(): invalid number of rows of the right hand side matrix b"); - return internal::solve_retval_with_guess - (*this, b.derived(), x0); - } -#endif /** \internal */ template @@ -245,25 +227,6 @@ protected: }; -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> Dec; - EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(),dst); - } -}; - -} // end namespace internal -#endif - } // end namespace Eigen #endif // EIGEN_CONJUGATE_GRADIENT_H diff --git a/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h index f337c5fb0..7adbbc489 100644 --- a/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +++ b/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h @@ -171,17 +171,6 @@ class IncompleteLUT : public SparseSolverBase > x = m_P * x; } -#ifndef EIGEN_TEST_EVALUATORS - template inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(m_isInitialized && "IncompleteLUT is not initialized."); - eigen_assert(cols()==b.rows() - && "IncompleteLUT::solve(): invalid number of rows of the right hand side matrix b"); - return internal::solve_retval(*this, b.derived()); - } -#endif - protected: /** keeps off-diagonal entries; drops diagonal entries */ @@ -451,25 +440,6 @@ void IncompleteLUT::factorize(const _MatrixType& amat) m_info = Success; } -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef IncompleteLUT<_MatrixType> Dec; - EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(),dst); - } -}; - -} // end namespace internal -#endif // EIGEN_TEST_EVALUATORS - } // end namespace Eigen #endif // EIGEN_INCOMPLETE_LUT_H diff --git a/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h b/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h index 26487dbb2..fd9285087 100644 --- a/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +++ b/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h @@ -162,38 +162,6 @@ public: return m_error; } -#ifndef EIGEN_TEST_EVALUATORS - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. - * - * \sa compute() - */ - template inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized."); - eigen_assert(rows()==b.rows() - && "IterativeSolverBase::solve(): invalid number of rows of the right hand side matrix b"); - return internal::solve_retval(derived(), b.derived()); - } -#endif - -#ifndef EIGEN_TEST_EVALUATORS - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. - * - * \sa compute() - */ - template - inline const internal::sparse_solve_retval - solve(const SparseMatrixBase& b) const - { - eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized."); - eigen_assert(rows()==b.rows() - && "IterativeSolverBase::solve(): invalid number of rows of the right hand side matrix b"); - return internal::sparse_solve_retval(*this, b.derived()); - } -#endif // EIGEN_TEST_EVALUATORS - -#ifdef EIGEN_TEST_EVALUATORS /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A * and \a x0 as an initial solution. * @@ -207,7 +175,6 @@ public: eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b"); return SolveWithGuess(derived(), b.derived(), x0); } -#endif // EIGEN_TEST_EVALUATORS /** \returns Success if the iterations converged, and NoConvergence otherwise. */ ComputationInfo info() const @@ -216,25 +183,6 @@ public: return m_info; } -#ifndef EIGEN_TEST_EVALUATORS - /** \internal */ - template - void _solve_sparse(const Rhs& b, SparseMatrix &dest) const - { - eigen_assert(rows()==b.rows()); - - int rhsCols = b.cols(); - int size = b.rows(); - Eigen::Matrix tb(size); - Eigen::Matrix tx(size); - for(int k=0; k void _solve_impl(const Rhs& b, SparseMatrix &dest) const @@ -252,7 +200,6 @@ public: dest.col(k) = tx.sparseView(0); } } -#endif // EIGEN_TEST_EVALUATORS protected: void init() @@ -275,25 +222,6 @@ protected: mutable bool m_analysisIsOk, m_factorizationIsOk; }; -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - -template -struct sparse_solve_retval, Rhs> - : sparse_solve_retval_base, Rhs> -{ - typedef IterativeSolverBase Dec; - EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec().derived()._solve_sparse(rhs(),dst); - } -}; - -} // end namespace internal -#endif // EIGEN_TEST_EVALUATORS - } // end namespace Eigen #endif // EIGEN_ITERATIVE_SOLVER_BASE_H diff --git a/Eigen/src/LU/Determinant.h b/Eigen/src/LU/Determinant.h index 9726bd96a..d6a3c1e5a 100644 --- a/Eigen/src/LU/Determinant.h +++ b/Eigen/src/LU/Determinant.h @@ -92,11 +92,7 @@ template inline typename internal::traits::Scalar MatrixBase::determinant() const { eigen_assert(rows() == cols()); -#ifdef EIGEN_TEST_EVALUATORS typedef typename internal::nested_eval::type Nested; -#else - typedef typename internal::nested::type Nested; -#endif return internal::determinant_impl::type>::run(derived()); } diff --git a/Eigen/src/LU/FullPivLU.h b/Eigen/src/LU/FullPivLU.h index daf99e305..fdf2e0642 100644 --- a/Eigen/src/LU/FullPivLU.h +++ b/Eigen/src/LU/FullPivLU.h @@ -220,7 +220,6 @@ template class FullPivLU * * \sa TriangularView::solve(), kernel(), inverse() */ -#ifdef EIGEN_TEST_EVALUATORS template inline const Solve solve(const MatrixBase& b) const @@ -228,15 +227,6 @@ template class FullPivLU eigen_assert(m_isInitialized && "LU is not initialized."); return Solve(*this, b.derived()); } -#else - template - inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(m_isInitialized && "LU is not initialized."); - return internal::solve_retval(*this, b.derived()); - } -#endif /** \returns the determinant of the matrix of which * *this is the LU decomposition. It has only linear complexity @@ -380,22 +370,12 @@ template class FullPivLU * * \sa MatrixBase::inverse() */ -#ifdef EIGEN_TEST_EVALUATORS inline const Inverse inverse() const { eigen_assert(m_isInitialized && "LU is not initialized."); eigen_assert(m_lu.rows() == m_lu.cols() && "You can't take the inverse of a non-square matrix!"); return Inverse(*this); } -#else - inline const internal::solve_retval inverse() const - { - eigen_assert(m_isInitialized && "LU is not initialized."); - eigen_assert(m_lu.rows() == m_lu.cols() && "You can't take the inverse of a non-square matrix!"); - return internal::solve_retval - (*this, MatrixType::Identity(m_lu.rows(), m_lu.cols())); - } -#endif MatrixType reconstructedMatrix() const; @@ -752,22 +732,8 @@ void FullPivLU<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const namespace internal { -#ifndef EIGEN_TEST_EVALUATORS -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - EIGEN_MAKE_SOLVE_HELPERS(FullPivLU<_MatrixType>,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(), dst); - } -}; -#endif /***** Implementation of inverse() *****************************************************/ -#ifdef EIGEN_TEST_EVALUATORS template struct Assignment >, internal::assign_op, Dense2Dense, Scalar> { @@ -778,7 +744,6 @@ struct Assignment >, internal::assign_ dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols())); } }; -#endif } // end namespace internal /******* MatrixBase methods *****************************************************************/ diff --git a/Eigen/src/LU/InverseImpl.h b/Eigen/src/LU/InverseImpl.h index e10fee48f..e5f270d19 100644 --- a/Eigen/src/LU/InverseImpl.h +++ b/Eigen/src/LU/InverseImpl.h @@ -43,12 +43,8 @@ struct compute_inverse static inline void run(const MatrixType& matrix, ResultType& result) { typedef typename MatrixType::Scalar Scalar; -#ifdef EIGEN_TEST_EVALUATORS typename internal::evaluator::type matrixEval(matrix); result.coeffRef(0,0) = Scalar(1) / matrixEval.coeff(0,0); -#else - result.coeffRef(0,0) = Scalar(1) / matrix.coeff(0,0); -#endif } }; @@ -285,46 +281,8 @@ struct compute_inverse_and_det_with_check *** MatrixBase methods *** *************************/ -#ifndef EIGEN_TEST_EVALUATORS -template -struct traits > -{ - typedef typename MatrixType::PlainObject ReturnType; -}; - -template -struct inverse_impl : public ReturnByValue > -{ - typedef typename MatrixType::Index Index; - typedef typename internal::eval::type MatrixTypeNested; - typedef typename remove_all::type MatrixTypeNestedCleaned; - MatrixTypeNested m_matrix; - - EIGEN_DEVICE_FUNC - inverse_impl(const MatrixType& matrix) - : m_matrix(matrix) - {} - - EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.rows(); } - EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.cols(); } - - template - EIGEN_DEVICE_FUNC - inline void evalTo(Dest& dst) const - { - const int Size = EIGEN_PLAIN_ENUM_MIN(MatrixType::ColsAtCompileTime,Dest::ColsAtCompileTime); - EIGEN_ONLY_USED_FOR_DEBUG(Size); - eigen_assert(( (Size<=1) || (Size>4) || (extract_data(m_matrix)!=extract_data(dst))) - && "Aliasing problem detected in inverse(), you need to do inverse().eval() here."); - - compute_inverse::run(m_matrix, dst); - } -}; -#endif } // end namespace internal -#ifdef EIGEN_TEST_EVALUATORS - namespace internal { // Specialization for "dense = dense_xpr.inverse()" @@ -352,8 +310,6 @@ struct Assignment, internal::assign_op, Den } // end namespace internal -#endif - /** \lu_module * * \returns the matrix inverse of this matrix. @@ -371,7 +327,6 @@ struct Assignment, internal::assign_op, Den * * \sa computeInverseAndDetWithCheck() */ -#ifdef EIGEN_TEST_EVALUATORS template inline const Inverse MatrixBase::inverse() const { @@ -379,15 +334,6 @@ inline const Inverse MatrixBase::inverse() const eigen_assert(rows() == cols()); return Inverse(derived()); } -#else -template -inline const internal::inverse_impl MatrixBase::inverse() const -{ - EIGEN_STATIC_ASSERT(!NumTraits::IsInteger,THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES) - eigen_assert(rows() == cols()); - return internal::inverse_impl(derived()); -} -#endif /** \lu_module * @@ -422,11 +368,7 @@ inline void MatrixBase::computeInverseAndDetWithCheck( // for larger sizes, evaluating has negligible cost and limits code size. typedef typename internal::conditional< RowsAtCompileTime == 2, -#ifndef EIGEN_TEST_EVALUATORS - typename internal::remove_all::type>::type, -#else typename internal::remove_all::type>::type, -#endif PlainObject >::type MatrixType; internal::compute_inverse_and_det_with_check::run diff --git a/Eigen/src/LU/PartialPivLU.h b/Eigen/src/LU/PartialPivLU.h index 57076f3a4..a4d22ce5f 100644 --- a/Eigen/src/LU/PartialPivLU.h +++ b/Eigen/src/LU/PartialPivLU.h @@ -142,7 +142,6 @@ template class PartialPivLU * * \sa TriangularView::solve(), inverse(), computeInverse() */ -#ifdef EIGEN_TEST_EVALUATORS template inline const Solve solve(const MatrixBase& b) const @@ -150,15 +149,6 @@ template class PartialPivLU eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); return Solve(*this, b.derived()); } -#else - template - inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); - return internal::solve_retval(*this, b.derived()); - } -#endif /** \returns the inverse of the matrix of which *this is the LU decomposition. * @@ -167,20 +157,11 @@ template class PartialPivLU * * \sa MatrixBase::inverse(), LU::inverse() */ -#ifdef EIGEN_TEST_EVALUATORS inline const Inverse inverse() const { eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); return Inverse(*this); } -#else - inline const internal::solve_retval inverse() const - { - eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); - return internal::solve_retval - (*this, MatrixType::Identity(m_lu.rows(), m_lu.cols())); - } -#endif /** \returns the determinant of the matrix of which * *this is the LU decomposition. It has only linear complexity @@ -490,22 +471,7 @@ MatrixType PartialPivLU::reconstructedMatrix() const namespace internal { -#ifndef EIGEN_TEST_EVALUATORS -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - EIGEN_MAKE_SOLVE_HELPERS(PartialPivLU<_MatrixType>,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(), dst); - } -}; -#endif - /***** Implementation of inverse() *****************************************************/ -#ifdef EIGEN_TEST_EVALUATORS template struct Assignment >, internal::assign_op, Dense2Dense, Scalar> { @@ -516,7 +482,6 @@ struct Assignment >, internal::assi dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols())); } }; -#endif } // end namespace internal /******** MatrixBase methods *******/ diff --git a/Eigen/src/PaStiXSupport/PaStiXSupport.h b/Eigen/src/PaStiXSupport/PaStiXSupport.h index 95d53c850..bb8e0d1a8 100644 --- a/Eigen/src/PaStiXSupport/PaStiXSupport.h +++ b/Eigen/src/PaStiXSupport/PaStiXSupport.h @@ -153,22 +153,6 @@ class PastixBase : public SparseSolverBase { clean(); } - -#ifndef EIGEN_TEST_EVALUATORS - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. - * - * \sa compute() - */ - template - inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(m_isInitialized && "Pastix solver is not initialized."); - eigen_assert(rows()==b.rows() - && "PastixBase::solve(): invalid number of rows of the right hand side matrix b"); - return internal::solve_retval(*this, b.derived()); - } -#endif template bool _solve_impl(const MatrixBase &b, MatrixBase &x) const; @@ -227,22 +211,6 @@ class PastixBase : public SparseSolverBase return m_info; } -#ifndef EIGEN_TEST_EVALUATORS - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. - * - * \sa compute() - */ - template - inline const internal::sparse_solve_retval - solve(const SparseMatrixBase& b) const - { - eigen_assert(m_isInitialized && "Pastix LU, LLT or LDLT is not initialized."); - eigen_assert(rows()==b.rows() - && "PastixBase::solve(): invalid number of rows of the right hand side matrix b"); - return internal::sparse_solve_retval(*this, b.derived()); - } -#endif // EIGEN_TEST_EVALUATORS - protected: // Initialize the Pastix data structure, check the matrix @@ -693,38 +661,6 @@ class PastixLDLT : public PastixBase< PastixLDLT<_MatrixType, _UpLo> > } }; -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef PastixBase<_MatrixType> Dec; - EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(),dst); - } -}; - -template -struct sparse_solve_retval, Rhs> - : sparse_solve_retval_base, Rhs> -{ - typedef PastixBase<_MatrixType> Dec; - EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - this->defaultEvalTo(dst); - } -}; - -} // end namespace internal -#endif // EIGEN_TEST_EVALUATORS - } // end namespace Eigen #endif diff --git a/Eigen/src/PardisoSupport/PardisoSupport.h b/Eigen/src/PardisoSupport/PardisoSupport.h index e3b1c5bc0..e1b0e1818 100644 --- a/Eigen/src/PardisoSupport/PardisoSupport.h +++ b/Eigen/src/PardisoSupport/PardisoSupport.h @@ -172,36 +172,6 @@ class PardisoImpl : public SparseSolveBase Derived& factorize(const MatrixType& matrix); Derived& compute(const MatrixType& matrix); - -#ifndef EIGEN_TEST_EVALUATORS - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. - * - * \sa compute() - */ - template - inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(m_isInitialized && "Pardiso solver is not initialized."); - eigen_assert(rows()==b.rows() - && "PardisoImpl::solve(): invalid number of rows of the right hand side matrix b"); - return internal::solve_retval(*this, b.derived()); - } - - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. - * - * \sa compute() - */ - template - inline const internal::sparse_solve_retval - solve(const SparseMatrixBase& b) const - { - eigen_assert(m_isInitialized && "Pardiso solver is not initialized."); - eigen_assert(rows()==b.rows() - && "PardisoImpl::solve(): invalid number of rows of the right hand side matrix b"); - return internal::sparse_solve_retval(*this, b.derived()); - } -#endif template bool _solve_impl(const MatrixBase &b, MatrixBase& x) const; @@ -546,38 +516,6 @@ class PardisoLDLT : public PardisoImpl< PardisoLDLT > } }; -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef PardisoImpl<_Derived> Dec; - EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(),dst); - } -}; - -template -struct sparse_solve_retval, Rhs> - : sparse_solve_retval_base, Rhs> -{ - typedef PardisoImpl Dec; - EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - this->defaultEvalTo(dst); - } -}; - -} // end namespace internal -#endif // EIGEN_TEST_EVALUATORS - } // end namespace Eigen #endif // EIGEN_PARDISOSUPPORT_H diff --git a/Eigen/src/QR/ColPivHouseholderQR.h b/Eigen/src/QR/ColPivHouseholderQR.h index 96904c65f..adf737276 100644 --- a/Eigen/src/QR/ColPivHouseholderQR.h +++ b/Eigen/src/QR/ColPivHouseholderQR.h @@ -147,7 +147,6 @@ template class ColPivHouseholderQR * Example: \include ColPivHouseholderQR_solve.cpp * Output: \verbinclude ColPivHouseholderQR_solve.out */ -#ifdef EIGEN_TEST_EVALUATORS template inline const Solve solve(const MatrixBase& b) const @@ -155,15 +154,6 @@ template class ColPivHouseholderQR eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); return Solve(*this, b.derived()); } -#else - template - inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); - return internal::solve_retval(*this, b.derived()); - } -#endif HouseholderSequenceType householderQ() const; HouseholderSequenceType matrixQ() const @@ -304,22 +294,11 @@ template class ColPivHouseholderQR * \note If this matrix is not invertible, the returned matrix has undefined coefficients. * Use isInvertible() to first determine whether this matrix is invertible. */ -#ifdef EIGEN_TEST_EVALUATORS inline const Inverse inverse() const { eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); return Inverse(*this); } -#else - inline const - internal::solve_retval - inverse() const - { - eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); - return internal::solve_retval - (*this, MatrixType::Identity(m_qr.rows(), m_qr.cols())); - } -#endif inline Index rows() const { return m_qr.rows(); } inline Index cols() const { return m_qr.cols(); } @@ -582,21 +561,6 @@ void ColPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType & namespace internal { -#ifndef EIGEN_TEST_EVALUATORS -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - EIGEN_MAKE_SOLVE_HELPERS(ColPivHouseholderQR<_MatrixType>,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(), dst); - } -}; -#endif - -#ifdef EIGEN_TEST_EVALUATORS template struct Assignment >, internal::assign_op, Dense2Dense, Scalar> { @@ -607,7 +571,6 @@ struct Assignment >, interna dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols())); } }; -#endif } // end namespace internal diff --git a/Eigen/src/QR/FullPivHouseholderQR.h b/Eigen/src/QR/FullPivHouseholderQR.h index ea15da041..710c64a45 100644 --- a/Eigen/src/QR/FullPivHouseholderQR.h +++ b/Eigen/src/QR/FullPivHouseholderQR.h @@ -151,7 +151,6 @@ template class FullPivHouseholderQR * Example: \include FullPivHouseholderQR_solve.cpp * Output: \verbinclude FullPivHouseholderQR_solve.out */ -#ifdef EIGEN_TEST_EVALUATORS template inline const Solve solve(const MatrixBase& b) const @@ -159,15 +158,6 @@ template class FullPivHouseholderQR eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); return Solve(*this, b.derived()); } -#else - template - inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); - return internal::solve_retval(*this, b.derived()); - } -#endif /** \returns Expression object representing the matrix Q */ @@ -298,22 +288,11 @@ template class FullPivHouseholderQR * \note If this matrix is not invertible, the returned matrix has undefined coefficients. * Use isInvertible() to first determine whether this matrix is invertible. */ -#ifdef EIGEN_TEST_EVALUATORS inline const Inverse inverse() const { eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); return Inverse(*this); } -#else - inline const - internal::solve_retval - inverse() const - { - eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); - return internal::solve_retval - (*this, MatrixType::Identity(m_qr.rows(), m_qr.cols())); - } -#endif inline Index rows() const { return m_qr.rows(); } inline Index cols() const { return m_qr.cols(); } @@ -556,21 +535,6 @@ void FullPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType namespace internal { -#ifndef EIGEN_TEST_EVALUATORS -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - EIGEN_MAKE_SOLVE_HELPERS(FullPivHouseholderQR<_MatrixType>,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(), dst); - } -}; -#endif // EIGEN_TEST_EVALUATORS - -#ifdef EIGEN_TEST_EVALUATORS template struct Assignment >, internal::assign_op, Dense2Dense, Scalar> { @@ -581,7 +545,6 @@ struct Assignment >, intern dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols())); } }; -#endif /** \ingroup QR_Module * @@ -589,7 +552,6 @@ struct Assignment >, intern * * \tparam MatrixType type of underlying dense matrix */ -// #ifndef EIGEN_TEST_EVALUATORS template struct FullPivHouseholderQRMatrixQReturnType : public ReturnByValue > { @@ -645,12 +607,10 @@ protected: typename IntDiagSizeVectorType::Nested m_rowsTranspositions; }; -// #ifdef EIGEN_TEST_EVALUATORS // template // struct evaluator > // : public evaluator > > // {}; -// #endif } // end namespace internal diff --git a/Eigen/src/QR/HouseholderQR.h b/Eigen/src/QR/HouseholderQR.h index 8604fff6f..0b0c9d1bd 100644 --- a/Eigen/src/QR/HouseholderQR.h +++ b/Eigen/src/QR/HouseholderQR.h @@ -117,7 +117,6 @@ template class HouseholderQR * Example: \include HouseholderQR_solve.cpp * Output: \verbinclude HouseholderQR_solve.out */ -#ifdef EIGEN_TEST_EVALUATORS template inline const Solve solve(const MatrixBase& b) const @@ -125,15 +124,6 @@ template class HouseholderQR eigen_assert(m_isInitialized && "HouseholderQR is not initialized."); return Solve(*this, b.derived()); } -#else - template - inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(m_isInitialized && "HouseholderQR is not initialized."); - return internal::solve_retval(*this, b.derived()); - } -#endif /** This method returns an expression of the unitary matrix Q as a sequence of Householder transformations. * @@ -351,24 +341,6 @@ void HouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) c } #endif -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - EIGEN_MAKE_SOLVE_HELPERS(HouseholderQR<_MatrixType>,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(), dst); - } -}; - -} // end namespace internal -#endif // EIGEN_TEST_EVALUATORS - /** Performs the QR factorization of the given matrix \a matrix. The result of * the factorization is stored into \c *this, and a reference to \c *this * is returned. diff --git a/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h index 483aaef07..bcdc981d7 100644 --- a/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +++ b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h @@ -124,21 +124,6 @@ class SPQR : public SparseSolverBase > * Get the number of columns of the input matrix. */ inline Index cols() const { return m_cR->ncol; } - -#ifndef EIGEN_TEST_EVALUATORS - /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A. - * - * \sa compute() - */ - template - inline const internal::solve_retval solve(const MatrixBase& B) const - { - eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()"); - eigen_assert(this->rows()==B.rows() - && "SPQR::solve(): invalid number of rows of the right hand side matrix B"); - return internal::solve_retval(*this, B.derived()); - } -#endif // EIGEN_TEST_EVALUATORS template void _solve_impl(const MatrixBase &b, MatrixBase &dest) const @@ -292,24 +277,5 @@ struct SPQRMatrixQTransposeReturnType{ const SPQRType& m_spqr; }; -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef SPQR<_MatrixType> Dec; - EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(),dst); - } -}; - -} // end namespace internal -#endif - }// End namespace Eigen #endif diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index 60a5aabb6..27b732b80 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -200,7 +200,6 @@ public: * \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving. * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$. */ -#ifdef EIGEN_TEST_EVALUATORS template inline const Solve solve(const MatrixBase& b) const @@ -209,16 +208,6 @@ public: eigen_assert(computeU() && computeV() && "SVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice)."); return Solve(derived(), b.derived()); } -#else - template - inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(m_isInitialized && "SVD is not initialized."); - eigen_assert(computeU() && computeV() && "SVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice)."); - return internal::solve_retval(*this, b.derived()); - } -#endif #ifndef EIGEN_PARSED_BY_DOXYGEN template @@ -273,23 +262,6 @@ void SVDBase::_solve_impl(const RhsType &rhs, DstType &dst) const } #endif -namespace internal { -#ifndef EIGEN_TEST_EVALUATORS -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef SVDBase SVDType; - EIGEN_MAKE_SOLVE_HELPERS(SVDType,Rhs) - - template void evalTo(Dest& dst) const - { - dec().derived()._solve_impl(rhs(), dst); - } -}; -#endif -} // end namespace internal - template bool SVDBase::allocate(Index rows, Index cols, unsigned int computationOptions) { diff --git a/Eigen/src/SparseCholesky/SimplicialCholesky.h b/Eigen/src/SparseCholesky/SimplicialCholesky.h index ac8cd29b0..3c8cef5db 100644 --- a/Eigen/src/SparseCholesky/SimplicialCholesky.h +++ b/Eigen/src/SparseCholesky/SimplicialCholesky.h @@ -84,36 +84,6 @@ class SimplicialCholeskyBase : public SparseSolverBase return m_info; } -#ifndef EIGEN_TEST_EVALUATORS - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. - * - * \sa compute() - */ - template - inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(m_isInitialized && "Simplicial LLT or LDLT is not initialized."); - eigen_assert(rows()==b.rows() - && "SimplicialCholeskyBase::solve(): invalid number of rows of the right hand side matrix b"); - return internal::solve_retval(*this, b.derived()); - } - - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. - * - * \sa compute() - */ - template - inline const internal::sparse_solve_retval - solve(const SparseMatrixBase& b) const - { - eigen_assert(m_isInitialized && "Simplicial LLT or LDLT is not initialized."); - eigen_assert(rows()==b.rows() - && "SimplicialCholesky::solve(): invalid number of rows of the right hand side matrix b"); - return internal::sparse_solve_retval(*this, b.derived()); - } -#endif // EIGEN_TEST_EVALUATORS - /** \returns the permutation P * \sa permutationPinv() */ const PermutationMatrix& permutationP() const @@ -157,11 +127,7 @@ class SimplicialCholeskyBase : public SparseSolverBase /** \internal */ template -#ifndef EIGEN_TEST_EVALUATORS - void _solve(const MatrixBase &b, MatrixBase &dest) const -#else void _solve_impl(const MatrixBase &b, MatrixBase &dest) const -#endif { eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); eigen_assert(m_matrix.rows()==b.rows()); @@ -186,14 +152,12 @@ class SimplicialCholeskyBase : public SparseSolverBase if(m_P.size()>0) dest = m_Pinv * dest; } - -#ifdef EIGEN_TEST_EVALUATORS + template void _solve_impl(const SparseMatrixBase &b, SparseMatrixBase &dest) const { internal::solve_sparse_through_dense_panels(derived(), b, dest); } -#endif #endif // EIGEN_PARSED_BY_DOXYGEN @@ -578,11 +542,7 @@ public: /** \internal */ template -#ifndef EIGEN_TEST_EVALUATORS - void _solve(const MatrixBase &b, MatrixBase &dest) const -#else void _solve_impl(const MatrixBase &b, MatrixBase &dest) const -#endif { eigen_assert(Base::m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); eigen_assert(Base::m_matrix.rows()==b.rows()); @@ -618,14 +578,12 @@ public: dest = Base::m_Pinv * dest; } -#ifdef EIGEN_TEST_EVALUATORS /** \internal */ template void _solve_impl(const SparseMatrixBase &b, SparseMatrixBase &dest) const { internal::solve_sparse_through_dense_panels(*this, b, dest); } -#endif Scalar determinant() const { @@ -667,38 +625,6 @@ void SimplicialCholeskyBase::ordering(const MatrixType& a, CholMatrixTy ap.template selfadjointView() = a.template selfadjointView().twistedBy(m_P); } -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef SimplicialCholeskyBase Dec; - EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec().derived()._solve(rhs(),dst); - } -}; - -template -struct sparse_solve_retval, Rhs> - : sparse_solve_retval_base, Rhs> -{ - typedef SimplicialCholeskyBase Dec; - EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - this->defaultEvalTo(dst); - } -}; - -} // end namespace internal -#endif // EIGEN_TEST_EVALUATORS - } // end namespace Eigen #endif // EIGEN_SIMPLICIAL_CHOLESKY_H diff --git a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h index 398008597..19d9eaa42 100644 --- a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +++ b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h @@ -39,10 +39,8 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs) Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros(); -#ifdef EIGEN_TEST_EVALUATORS typename evaluator::type lhsEval(lhs); typename evaluator::type rhsEval(rhs); -#endif res.setZero(); res.reserve(Index(estimated_nnz_prod)); @@ -52,19 +50,11 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r res.startVec(j); Index nnz = 0; -#ifndef EIGEN_TEST_EVALUATORS - for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt) -#else for (typename evaluator::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt) -#endif { Scalar y = rhsIt.value(); Index k = rhsIt.index(); -#ifndef EIGEN_TEST_EVALUATORS - for (typename Lhs::InnerIterator lhsIt(lhs, k); lhsIt; ++lhsIt) -#else for (typename evaluator::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt) -#endif { Index i = lhsIt.index(); Scalar x = lhsIt.value(); diff --git a/Eigen/src/SparseCore/MappedSparseMatrix.h b/Eigen/src/SparseCore/MappedSparseMatrix.h index 9205b906f..d9aabd049 100644 --- a/Eigen/src/SparseCore/MappedSparseMatrix.h +++ b/Eigen/src/SparseCore/MappedSparseMatrix.h @@ -176,7 +176,6 @@ class MappedSparseMatrix::ReverseInnerIterator const Index m_end; }; -#ifdef EIGEN_ENABLE_EVALUATORS namespace internal { template @@ -202,7 +201,6 @@ struct evaluator > }; } -#endif } // end namespace Eigen diff --git a/Eigen/src/SparseCore/SparseAssign.h b/Eigen/src/SparseCore/SparseAssign.h index 36c9fe845..97c079d3f 100644 --- a/Eigen/src/SparseCore/SparseAssign.h +++ b/Eigen/src/SparseCore/SparseAssign.h @@ -12,117 +12,6 @@ namespace Eigen { -#ifndef EIGEN_TEST_EVALUATORS - -template -template -Derived& SparseMatrixBase::operator=(const EigenBase &other) -{ - other.derived().evalTo(derived()); - return derived(); -} - -template -template -Derived& SparseMatrixBase::operator=(const ReturnByValue& other) -{ - other.evalTo(derived()); - return derived(); -} - -template -template -inline Derived& SparseMatrixBase::operator=(const SparseMatrixBase& other) -{ - return assign(other.derived()); -} - -template -inline Derived& SparseMatrixBase::operator=(const Derived& other) -{ -// if (other.isRValue()) -// derived().swap(other.const_cast_derived()); -// else - return assign(other.derived()); -} - -template -template -inline Derived& SparseMatrixBase::assign(const OtherDerived& other) -{ - const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); - const Index outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? Index(other.rows()) : Index(other.cols()); - if ((!transpose) && other.isRValue()) - { - // eval without temporary - derived().resize(Index(other.rows()), Index(other.cols())); - derived().setZero(); - derived().reserve((std::max)(this->rows(),this->cols())*2); - for (Index j=0; j -template -inline void SparseMatrixBase::assignGeneric(const OtherDerived& other) -{ - //const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); - eigen_assert(( ((internal::traits::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) || - (!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) && - "the transpose operation is supposed to be handled in SparseMatrix::operator="); - - enum { Flip = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit) }; - - const Index outerSize = Index(other.outerSize()); - //typedef typename internal::conditional, Derived>::type TempType; - // thanks to shallow copies, we always eval to a tempary - Derived temp(Index(other.rows()), Index(other.cols())); - - temp.reserve((std::max)(this->rows(),this->cols())*2); - for (Index j=0; j -// inline Derived& operator=(const SparseSparseProduct& product); -// -// template -// Derived& operator+=(const SparseMatrixBase& other); -// template -// Derived& operator-=(const SparseMatrixBase& other); -// -// Derived& operator*=(const Scalar& other); -// Derived& operator/=(const Scalar& other); -// -// template -// Derived& operator*=(const SparseMatrixBase& other); - -#else // EIGEN_TEST_EVALUATORS - template template Derived& SparseMatrixBase::operator=(const EigenBase &other) @@ -298,8 +187,6 @@ struct Assignment, internal::assign_opindex(); } - inline Index col() const { return IsRowMajor ? this->index() : m_outer; } - protected: - Index m_outer; - }; - class ReverseInnerIterator: public XprType::ReverseInnerIterator - { - typedef typename BlockImpl::Index Index; - public: - inline ReverseInnerIterator(const BlockType& xpr, Index outer) - : XprType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) - {} - inline Index row() const { return IsRowMajor ? m_outer : this->index(); } - inline Index col() const { return IsRowMajor ? this->index() : m_outer; } - protected: - Index m_outer; - }; -#endif // EIGEN_TEST_EVALUATORS inline BlockImpl(const XprType& xpr, Index i) : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize) @@ -66,14 +39,6 @@ public: Index nonZeros() const { -#ifndef EIGEN_TEST_EVALUATORS - Index nnz = 0; - Index end = m_outerStart + m_outerSize.value(); - for(Index j=m_outerStart; j::type EvaluatorType; EvaluatorType matEval(m_matrix); Index nnz = 0; @@ -82,7 +47,6 @@ public: for(typename EvaluatorType::InnerIterator it(matEval, j); it; ++it) ++nnz; return nnz; -#endif // EIGEN_TEST_EVALUATORS } inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; } @@ -120,31 +84,6 @@ public: protected: enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; public: - -#ifndef EIGEN_TEST_EVALUATORS - class InnerIterator: public SparseMatrixType::InnerIterator - { - public: - inline InnerIterator(const BlockType& xpr, Index outer) - : SparseMatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) - {} - inline Index row() const { return IsRowMajor ? m_outer : this->index(); } - inline Index col() const { return IsRowMajor ? this->index() : m_outer; } - protected: - Index m_outer; - }; - class ReverseInnerIterator: public SparseMatrixType::ReverseInnerIterator - { - public: - inline ReverseInnerIterator(const BlockType& xpr, Index outer) - : SparseMatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) - {} - inline Index row() const { return IsRowMajor ? m_outer : this->index(); } - inline Index col() const { return IsRowMajor ? this->index() : m_outer; } - protected: - Index m_outer; - }; -#endif // EIGEN_TEST_EVALUATORS inline sparse_matrix_block_impl(const SparseMatrixType& xpr, Index i) : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize) @@ -434,34 +373,6 @@ public: m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); } -#ifndef EIGEN_TEST_EVALUATORS - typedef internal::GenericSparseBlockInnerIteratorImpl InnerIterator; - - class ReverseInnerIterator : public _MatrixTypeNested::ReverseInnerIterator - { - typedef typename _MatrixTypeNested::ReverseInnerIterator Base; - const BlockType& m_block; - Index m_begin; - public: - - EIGEN_STRONG_INLINE ReverseInnerIterator(const BlockType& block, Index outer) - : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())), - m_block(block), - m_begin(IsRowMajor ? block.m_startCol.value() : block.m_startRow.value()) - { - while( (Base::operator bool()) && (Base::index() >= (IsRowMajor ? m_block.m_startCol.value()+block.m_blockCols.value() : m_block.m_startRow.value()+block.m_blockRows.value())) ) - Base::operator--(); - } - - inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } - inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); } - inline Index row() const { return Base::row() - m_block.m_startRow.value(); } - inline Index col() const { return Base::col() - m_block.m_startCol.value(); } - - inline operator bool() const { return Base::operator bool() && Base::index() >= m_begin; } - }; -#endif // EIGEN_TEST_EVALUATORS - inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; } Index startRow() const { return m_startRow.value(); } Index startCol() const { return m_startCol.value(); } @@ -574,9 +485,6 @@ namespace internal { inline operator bool() const { return m_outerPos < m_end; } }; -#ifdef EIGEN_TEST_EVALUATORS - -// template struct unary_evaluator, IteratorBased > : public evaluator_base > @@ -690,9 +598,6 @@ public: inline operator bool() const { return m_outerPos < m_end; } }; - -#endif // EIGEN_TEST_EVALUATORS - } // end namespace internal diff --git a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h index 3a7e72cd2..5993c1caf 100644 --- a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +++ b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h @@ -38,256 +38,6 @@ class sparse_cwise_binary_op_inner_iterator_selector; } // end namespace internal -#ifndef EIGEN_TEST_EVALUATORS - -template -class CwiseBinaryOpImpl - : public SparseMatrixBase > -{ - public: - class InnerIterator; - class ReverseInnerIterator; - typedef CwiseBinaryOp Derived; - EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) - CwiseBinaryOpImpl() - { - typedef typename internal::traits::StorageKind LhsStorageKind; - typedef typename internal::traits::StorageKind RhsStorageKind; - EIGEN_STATIC_ASSERT(( - (!internal::is_same::value) - || ((Lhs::Flags&RowMajorBit) == (Rhs::Flags&RowMajorBit))), - THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH); - } -}; - -template -class CwiseBinaryOpImpl::InnerIterator - : public internal::sparse_cwise_binary_op_inner_iterator_selector::InnerIterator> -{ - public: - typedef internal::sparse_cwise_binary_op_inner_iterator_selector< - BinaryOp,Lhs,Rhs, InnerIterator> Base; - - EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, Index outer) - : Base(binOp.derived(),outer) - {} -}; - -/*************************************************************************** -* Implementation of inner-iterators -***************************************************************************/ - -// template struct internal::func_is_conjunction { enum { ret = false }; }; -// template struct internal::func_is_conjunction > { enum { ret = true }; }; - -// TODO generalize the internal::scalar_product_op specialization to all conjunctions if any ! - -namespace internal { - -// sparse - sparse (generic) -template -class sparse_cwise_binary_op_inner_iterator_selector -{ - typedef CwiseBinaryOp CwiseBinaryXpr; - typedef typename traits::Scalar Scalar; - typedef typename traits::Index Index; - typedef typename traits::_LhsNested _LhsNested; - typedef typename traits::_RhsNested _RhsNested; - typedef typename _LhsNested::InnerIterator LhsIterator; - typedef typename _RhsNested::InnerIterator RhsIterator; - - public: - - EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) - : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()) - { - this->operator++(); - } - - EIGEN_STRONG_INLINE Derived& operator++() - { - if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index())) - { - m_id = m_lhsIter.index(); - m_value = m_functor(m_lhsIter.value(), m_rhsIter.value()); - ++m_lhsIter; - ++m_rhsIter; - } - else if (m_lhsIter && (!m_rhsIter || (m_lhsIter.index() < m_rhsIter.index()))) - { - m_id = m_lhsIter.index(); - m_value = m_functor(m_lhsIter.value(), Scalar(0)); - ++m_lhsIter; - } - else if (m_rhsIter && (!m_lhsIter || (m_lhsIter.index() > m_rhsIter.index()))) - { - m_id = m_rhsIter.index(); - m_value = m_functor(Scalar(0), m_rhsIter.value()); - ++m_rhsIter; - } - else - { - m_value = 0; // this is to avoid a compilation warning - m_id = -1; - } - return *static_cast(this); - } - - EIGEN_STRONG_INLINE Scalar value() const { return m_value; } - - EIGEN_STRONG_INLINE Index index() const { return m_id; } - EIGEN_STRONG_INLINE Index row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); } - EIGEN_STRONG_INLINE Index col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); } - - EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; } - - protected: - LhsIterator m_lhsIter; - RhsIterator m_rhsIter; - const BinaryOp& m_functor; - Scalar m_value; - Index m_id; -}; - -// sparse - sparse (product) -template -class sparse_cwise_binary_op_inner_iterator_selector, Lhs, Rhs, Derived, Sparse, Sparse> -{ - typedef scalar_product_op BinaryFunc; - typedef CwiseBinaryOp CwiseBinaryXpr; - typedef typename CwiseBinaryXpr::Scalar Scalar; - typedef typename CwiseBinaryXpr::Index Index; - typedef typename traits::_LhsNested _LhsNested; - typedef typename _LhsNested::InnerIterator LhsIterator; - typedef typename traits::_RhsNested _RhsNested; - typedef typename _RhsNested::InnerIterator RhsIterator; - public: - - EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) - : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()) - { - while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index())) - { - if (m_lhsIter.index() < m_rhsIter.index()) - ++m_lhsIter; - else - ++m_rhsIter; - } - } - - EIGEN_STRONG_INLINE Derived& operator++() - { - ++m_lhsIter; - ++m_rhsIter; - while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index())) - { - if (m_lhsIter.index() < m_rhsIter.index()) - ++m_lhsIter; - else - ++m_rhsIter; - } - return *static_cast(this); - } - - EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); } - - EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); } - EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); } - EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); } - - EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); } - - protected: - LhsIterator m_lhsIter; - RhsIterator m_rhsIter; - const BinaryFunc& m_functor; -}; - -// sparse - dense (product) -template -class sparse_cwise_binary_op_inner_iterator_selector, Lhs, Rhs, Derived, Sparse, Dense> -{ - typedef scalar_product_op BinaryFunc; - typedef CwiseBinaryOp CwiseBinaryXpr; - typedef typename CwiseBinaryXpr::Scalar Scalar; - typedef typename CwiseBinaryXpr::Index Index; - typedef typename traits::_LhsNested _LhsNested; - typedef typename traits::RhsNested RhsNested; - typedef typename _LhsNested::InnerIterator LhsIterator; - enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit }; - public: - - EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) - : m_rhs(xpr.rhs()), m_lhsIter(xpr.lhs(),typename _LhsNested::Index(outer)), m_functor(xpr.functor()), m_outer(outer) - {} - - EIGEN_STRONG_INLINE Derived& operator++() - { - ++m_lhsIter; - return *static_cast(this); - } - - EIGEN_STRONG_INLINE Scalar value() const - { return m_functor(m_lhsIter.value(), - m_rhs.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); } - - EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); } - EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); } - EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); } - - EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; } - - protected: - RhsNested m_rhs; - LhsIterator m_lhsIter; - const BinaryFunc m_functor; - const Index m_outer; -}; - -// sparse - dense (product) -template -class sparse_cwise_binary_op_inner_iterator_selector, Lhs, Rhs, Derived, Dense, Sparse> -{ - typedef scalar_product_op BinaryFunc; - typedef CwiseBinaryOp CwiseBinaryXpr; - typedef typename CwiseBinaryXpr::Scalar Scalar; - typedef typename CwiseBinaryXpr::Index Index; - typedef typename traits::_RhsNested _RhsNested; - typedef typename _RhsNested::InnerIterator RhsIterator; - - enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit }; - public: - - EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) - : m_xpr(xpr), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()), m_outer(outer) - {} - - EIGEN_STRONG_INLINE Derived& operator++() - { - ++m_rhsIter; - return *static_cast(this); - } - - EIGEN_STRONG_INLINE Scalar value() const - { return m_functor(m_xpr.lhs().coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); } - - EIGEN_STRONG_INLINE Index index() const { return m_rhsIter.index(); } - EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); } - EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); } - - EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; } - - protected: - const CwiseBinaryXpr& m_xpr; - RhsIterator m_rhsIter; - const BinaryFunc& m_functor; - const Index m_outer; -}; - -} // end namespace internal - -#else // EIGEN_TEST_EVALUATORS - namespace internal { @@ -590,10 +340,6 @@ protected: } -#endif - - - /*************************************************************************** * Implementation of SparseMatrixBase and SparseCwise functions/operators ***************************************************************************/ diff --git a/Eigen/src/SparseCore/SparseCwiseUnaryOp.h b/Eigen/src/SparseCore/SparseCwiseUnaryOp.h index 0e0f9f5f5..6036fd0a7 100644 --- a/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +++ b/Eigen/src/SparseCore/SparseCwiseUnaryOp.h @@ -11,136 +11,6 @@ #define EIGEN_SPARSE_CWISE_UNARY_OP_H namespace Eigen { - -#ifndef EIGEN_TEST_EVALUATORS - -template -class CwiseUnaryOpImpl - : public SparseMatrixBase > -{ - public: - - typedef CwiseUnaryOp Derived; - EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) - - class InnerIterator; - class ReverseInnerIterator; - - protected: - typedef typename internal::traits::_XprTypeNested _MatrixTypeNested; - typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator; - typedef typename _MatrixTypeNested::ReverseInnerIterator MatrixTypeReverseIterator; -}; - -template -class CwiseUnaryOpImpl::InnerIterator - : public CwiseUnaryOpImpl::MatrixTypeIterator -{ - typedef typename CwiseUnaryOpImpl::Scalar Scalar; - typedef typename CwiseUnaryOpImpl::MatrixTypeIterator Base; - public: - - EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryOpImpl& unaryOp, typename CwiseUnaryOpImpl::Index outer) - : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor()) - {} - - EIGEN_STRONG_INLINE InnerIterator& operator++() - { Base::operator++(); return *this; } - - EIGEN_STRONG_INLINE typename CwiseUnaryOpImpl::Scalar value() const { return m_functor(Base::value()); } - - protected: - const UnaryOp m_functor; - private: - typename CwiseUnaryOpImpl::Scalar& valueRef(); -}; - -template -class CwiseUnaryOpImpl::ReverseInnerIterator - : public CwiseUnaryOpImpl::MatrixTypeReverseIterator -{ - typedef typename CwiseUnaryOpImpl::Scalar Scalar; - typedef typename CwiseUnaryOpImpl::MatrixTypeReverseIterator Base; - public: - - EIGEN_STRONG_INLINE ReverseInnerIterator(const CwiseUnaryOpImpl& unaryOp, typename CwiseUnaryOpImpl::Index outer) - : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor()) - {} - - EIGEN_STRONG_INLINE ReverseInnerIterator& operator--() - { Base::operator--(); return *this; } - - EIGEN_STRONG_INLINE typename CwiseUnaryOpImpl::Scalar value() const { return m_functor(Base::value()); } - - protected: - const UnaryOp m_functor; - private: - typename CwiseUnaryOpImpl::Scalar& valueRef(); -}; - -template -class CwiseUnaryViewImpl - : public SparseMatrixBase > -{ - public: - - class InnerIterator; - class ReverseInnerIterator; - - typedef CwiseUnaryView Derived; - EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) - - protected: - typedef typename internal::traits::_MatrixTypeNested _MatrixTypeNested; - typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator; - typedef typename _MatrixTypeNested::ReverseInnerIterator MatrixTypeReverseIterator; -}; - -template -class CwiseUnaryViewImpl::InnerIterator - : public CwiseUnaryViewImpl::MatrixTypeIterator -{ - typedef typename CwiseUnaryViewImpl::Scalar Scalar; - typedef typename CwiseUnaryViewImpl::MatrixTypeIterator Base; - public: - - EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryViewImpl& unaryOp, typename CwiseUnaryViewImpl::Index outer) - : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor()) - {} - - EIGEN_STRONG_INLINE InnerIterator& operator++() - { Base::operator++(); return *this; } - - EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar value() const { return m_functor(Base::value()); } - EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar& valueRef() { return m_functor(Base::valueRef()); } - - protected: - const ViewOp m_functor; -}; - -template -class CwiseUnaryViewImpl::ReverseInnerIterator - : public CwiseUnaryViewImpl::MatrixTypeReverseIterator -{ - typedef typename CwiseUnaryViewImpl::Scalar Scalar; - typedef typename CwiseUnaryViewImpl::MatrixTypeReverseIterator Base; - public: - - EIGEN_STRONG_INLINE ReverseInnerIterator(const CwiseUnaryViewImpl& unaryOp, typename CwiseUnaryViewImpl::Index outer) - : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor()) - {} - - EIGEN_STRONG_INLINE ReverseInnerIterator& operator--() - { Base::operator--(); return *this; } - - EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar value() const { return m_functor(Base::value()); } - EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar& valueRef() { return m_functor(Base::valueRef()); } - - protected: - const ViewOp m_functor; -}; - -#else // EIGEN_TEST_EVALUATORS namespace internal { @@ -291,8 +161,6 @@ class unary_evaluator, IteratorBased>::InnerItera } // end namespace internal -#endif // EIGEN_TEST_EVALUATORS - template EIGEN_STRONG_INLINE Derived& SparseMatrixBase::operator*=(const Scalar& other) diff --git a/Eigen/src/SparseCore/SparseDenseProduct.h b/Eigen/src/SparseCore/SparseDenseProduct.h index 803d98e2d..04c838a71 100644 --- a/Eigen/src/SparseCore/SparseDenseProduct.h +++ b/Eigen/src/SparseCore/SparseDenseProduct.h @@ -30,18 +30,10 @@ struct sparse_time_dense_product_impl::type Rhs; typedef typename internal::remove_all::type Res; typedef typename Lhs::Index Index; -#ifndef EIGEN_TEST_EVALUATORS - typedef typename Lhs::InnerIterator LhsInnerIterator; -#else typedef typename evaluator::InnerIterator LhsInnerIterator; -#endif static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { -#ifndef EIGEN_TEST_EVALUATORS - const Lhs &lhsEval(lhs); -#else - typename evaluator::type lhsEval(lhs); -#endif + typename evaluator::type lhsEval(lhs); for(Index c=0; c::type Rhs; typedef typename internal::remove_all::type Res; typedef typename Lhs::Index Index; -#ifndef EIGEN_TEST_EVALUATORS - typedef typename Lhs::InnerIterator LhsInnerIterator; -#else typedef typename evaluator::InnerIterator LhsInnerIterator; -#endif static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) { -#ifndef EIGEN_TEST_EVALUATORS - const Lhs &lhsEval(lhs); -#else - typename evaluator::type lhsEval(lhs); -#endif + typename evaluator::type lhsEval(lhs); for(Index c=0; c::type Rhs; typedef typename internal::remove_all::type Res; typedef typename Lhs::Index Index; -#ifndef EIGEN_TEST_EVALUATORS - typedef typename Lhs::InnerIterator LhsInnerIterator; -#else typedef typename evaluator::InnerIterator LhsInnerIterator; -#endif static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { -#ifndef EIGEN_TEST_EVALUATORS - const Lhs &lhsEval(lhs); -#else - typename evaluator::type lhsEval(lhs); -#endif + typename evaluator::type lhsEval(lhs); for(Index j=0; j::type Rhs; typedef typename internal::remove_all::type Res; typedef typename Lhs::Index Index; -#ifndef EIGEN_TEST_EVALUATORS - typedef typename Lhs::InnerIterator LhsInnerIterator; -#else typedef typename evaluator::InnerIterator LhsInnerIterator; -#endif static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { -#ifndef EIGEN_TEST_EVALUATORS - const Lhs &lhsEval(lhs); -#else - typename evaluator::type lhsEval(lhs); -#endif + typename evaluator::type lhsEval(lhs); for(Index j=0; j struct SparseDenseProductReturnType -{ - typedef SparseTimeDenseProduct Type; -}; - -template struct SparseDenseProductReturnType -{ - typedef typename internal::conditional< - Lhs::IsRowMajor, - SparseDenseOuterProduct, - SparseDenseOuterProduct >::type Type; -}; - -template struct DenseSparseProductReturnType -{ - typedef DenseTimeSparseProduct Type; -}; - -template struct DenseSparseProductReturnType -{ - typedef typename internal::conditional< - Rhs::IsRowMajor, - SparseDenseOuterProduct, - SparseDenseOuterProduct >::type Type; -}; - -namespace internal { - -template -struct traits > -{ - typedef Sparse StorageKind; - typedef typename scalar_product_traits::Scalar, - typename traits::Scalar>::ReturnType Scalar; - typedef typename Lhs::Index Index; - typedef typename Lhs::Nested LhsNested; - typedef typename Rhs::Nested RhsNested; - typedef typename remove_all::type _LhsNested; - typedef typename remove_all::type _RhsNested; - - enum { - LhsCoeffReadCost = traits<_LhsNested>::CoeffReadCost, - RhsCoeffReadCost = traits<_RhsNested>::CoeffReadCost, - - RowsAtCompileTime = Tr ? int(traits::RowsAtCompileTime) : int(traits::RowsAtCompileTime), - ColsAtCompileTime = Tr ? int(traits::ColsAtCompileTime) : int(traits::ColsAtCompileTime), - MaxRowsAtCompileTime = Tr ? int(traits::MaxRowsAtCompileTime) : int(traits::MaxRowsAtCompileTime), - MaxColsAtCompileTime = Tr ? int(traits::MaxColsAtCompileTime) : int(traits::MaxColsAtCompileTime), - - Flags = Tr ? RowMajorBit : 0, - - CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + NumTraits::MulCost - }; -}; - -} // end namespace internal - -template -class SparseDenseOuterProduct - : public SparseMatrixBase > -{ - public: - - typedef SparseMatrixBase Base; - EIGEN_DENSE_PUBLIC_INTERFACE(SparseDenseOuterProduct) - typedef internal::traits Traits; - - private: - - typedef typename Traits::LhsNested LhsNested; - typedef typename Traits::RhsNested RhsNested; - typedef typename Traits::_LhsNested _LhsNested; - typedef typename Traits::_RhsNested _RhsNested; - - public: - - class InnerIterator; - - EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Lhs& lhs, const Rhs& rhs) - : m_lhs(lhs), m_rhs(rhs) - { - EIGEN_STATIC_ASSERT(!Tr,YOU_MADE_A_PROGRAMMING_MISTAKE); - } - - EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Rhs& rhs, const Lhs& lhs) - : m_lhs(lhs), m_rhs(rhs) - { - EIGEN_STATIC_ASSERT(Tr,YOU_MADE_A_PROGRAMMING_MISTAKE); - } - - EIGEN_STRONG_INLINE Index rows() const { return Tr ? Index(m_rhs.rows()) : m_lhs.rows(); } - EIGEN_STRONG_INLINE Index cols() const { return Tr ? m_lhs.cols() : Index(m_rhs.cols()); } - - EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; } - EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; } - - protected: - LhsNested m_lhs; - RhsNested m_rhs; -}; - -template -class SparseDenseOuterProduct::InnerIterator : public _LhsNested::InnerIterator -{ - typedef typename _LhsNested::InnerIterator Base; - typedef typename SparseDenseOuterProduct::Index Index; - public: - EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer) - : Base(prod.lhs(), 0), m_outer(outer), m_empty(false), m_factor(get(prod.rhs(), outer, typename internal::traits::StorageKind() )) - {} - - inline Index outer() const { return m_outer; } - inline Index row() const { return Transpose ? m_outer : Base::index(); } - inline Index col() const { return Transpose ? Base::index() : m_outer; } - - inline Scalar value() const { return Base::value() * m_factor; } - inline operator bool() const { return Base::operator bool() && !m_empty; } - - protected: - Scalar get(const _RhsNested &rhs, Index outer, Dense = Dense()) const - { - return rhs.coeff(outer); - } - - Scalar get(const _RhsNested &rhs, Index outer, Sparse = Sparse()) - { - typename Traits::_RhsNested::InnerIterator it(rhs, outer); - if (it && it.index()==0 && it.value()!=Scalar(0)) - return it.value(); - m_empty = true; - return Scalar(0); - } - - Index m_outer; - bool m_empty; - Scalar m_factor; -}; - -namespace internal { -template -struct traits > - : traits, Lhs, Rhs> > -{ - typedef Dense StorageKind; - typedef MatrixXpr XprKind; -}; - -} // end namespace internal - -template -class SparseTimeDenseProduct - : public ProductBase, Lhs, Rhs> -{ - public: - EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseTimeDenseProduct) - - SparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) - {} - - template void scaleAndAddTo(Dest& dest, const Scalar& alpha) const - { - internal::sparse_time_dense_product(m_lhs, m_rhs, dest, alpha); - } - - private: - SparseTimeDenseProduct& operator=(const SparseTimeDenseProduct&); -}; - - -// dense = dense * sparse -namespace internal { -template -struct traits > - : traits, Lhs, Rhs> > -{ - typedef Dense StorageKind; -}; -} // end namespace internal - -template -class DenseTimeSparseProduct - : public ProductBase, Lhs, Rhs> -{ - public: - EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseProduct) - - DenseTimeSparseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) - {} - - template void scaleAndAddTo(Dest& dest, const Scalar& alpha) const - { - Transpose lhs_t(m_lhs); - Transpose rhs_t(m_rhs); - Transpose dest_t(dest); - internal::sparse_time_dense_product(rhs_t, lhs_t, dest_t, alpha); - } - - private: - DenseTimeSparseProduct& operator=(const DenseTimeSparseProduct&); -}; - -#endif // EIGEN_TEST_EVALUATORS - -#ifdef EIGEN_TEST_EVALUATORS - namespace internal { template @@ -514,8 +276,6 @@ struct product_evaluator, OuterProduct, DenseS } // end namespace internal -#endif // EIGEN_TEST_EVALUATORS - } // end namespace Eigen #endif // EIGEN_SPARSEDENSEPRODUCT_H diff --git a/Eigen/src/SparseCore/SparseDiagonalProduct.h b/Eigen/src/SparseCore/SparseDiagonalProduct.h index 9f465a828..0cb2bd572 100644 --- a/Eigen/src/SparseCore/SparseDiagonalProduct.h +++ b/Eigen/src/SparseCore/SparseDiagonalProduct.h @@ -24,171 +24,8 @@ namespace Eigen { // for that particular case // The two other cases are symmetric. -#ifndef EIGEN_TEST_EVALUATORS - -namespace internal { - -template -struct traits > -{ - typedef typename remove_all::type _Lhs; - typedef typename remove_all::type _Rhs; - typedef typename _Lhs::Scalar Scalar; - // propagate the index type of the sparse matrix - typedef typename conditional< is_diagonal<_Lhs>::ret, - typename traits::Index, - typename traits::Index>::type Index; - typedef Sparse StorageKind; - typedef MatrixXpr XprKind; - enum { - RowsAtCompileTime = _Lhs::RowsAtCompileTime, - ColsAtCompileTime = _Rhs::ColsAtCompileTime, - - MaxRowsAtCompileTime = _Lhs::MaxRowsAtCompileTime, - MaxColsAtCompileTime = _Rhs::MaxColsAtCompileTime, - - SparseFlags = is_diagonal<_Lhs>::ret ? int(_Rhs::Flags) : int(_Lhs::Flags), - Flags = (SparseFlags&RowMajorBit), - CoeffReadCost = Dynamic - }; -}; - -enum {SDP_IsDiagonal, SDP_IsSparseRowMajor, SDP_IsSparseColMajor}; -template -class sparse_diagonal_product_inner_iterator_selector; - -} // end namespace internal - -template -class SparseDiagonalProduct - : public SparseMatrixBase >, - internal::no_assignment_operator -{ - typedef typename Lhs::Nested LhsNested; - typedef typename Rhs::Nested RhsNested; - - typedef typename internal::remove_all::type _LhsNested; - typedef typename internal::remove_all::type _RhsNested; - - enum { - LhsMode = internal::is_diagonal<_LhsNested>::ret ? internal::SDP_IsDiagonal - : (_LhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor, - RhsMode = internal::is_diagonal<_RhsNested>::ret ? internal::SDP_IsDiagonal - : (_RhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor - }; - - public: - - EIGEN_SPARSE_PUBLIC_INTERFACE(SparseDiagonalProduct) - - typedef internal::sparse_diagonal_product_inner_iterator_selector - <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator; - - // We do not want ReverseInnerIterator for diagonal-sparse products, - // but this dummy declaration is needed to make diag * sparse * diag compile. - class ReverseInnerIterator; - - EIGEN_STRONG_INLINE SparseDiagonalProduct(const Lhs& lhs, const Rhs& rhs) - : m_lhs(lhs), m_rhs(rhs) - { - eigen_assert(lhs.cols() == rhs.rows() && "invalid sparse matrix * diagonal matrix product"); - } - - EIGEN_STRONG_INLINE Index rows() const { return Index(m_lhs.rows()); } - EIGEN_STRONG_INLINE Index cols() const { return Index(m_rhs.cols()); } - - EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; } - EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; } - - protected: - LhsNested m_lhs; - RhsNested m_rhs; -}; -#endif - namespace internal { -#ifndef EIGEN_TEST_EVALUATORS - - - -template -class sparse_diagonal_product_inner_iterator_selector - - : public CwiseUnaryOp,const Rhs>::InnerIterator -{ - typedef typename CwiseUnaryOp,const Rhs>::InnerIterator Base; - typedef typename Rhs::Index Index; - public: - inline sparse_diagonal_product_inner_iterator_selector( - const SparseDiagonalProductType& expr, Index outer) - : Base(expr.rhs()*(expr.lhs().diagonal().coeff(outer)), outer) - {} -}; - -template -class sparse_diagonal_product_inner_iterator_selector - - : public CwiseBinaryOp< - scalar_product_op, - const typename Rhs::ConstInnerVectorReturnType, - const typename Lhs::DiagonalVectorType>::InnerIterator -{ - typedef typename CwiseBinaryOp< - scalar_product_op, - const typename Rhs::ConstInnerVectorReturnType, - const typename Lhs::DiagonalVectorType>::InnerIterator Base; - typedef typename Rhs::Index Index; - Index m_outer; - public: - inline sparse_diagonal_product_inner_iterator_selector( - const SparseDiagonalProductType& expr, Index outer) - : Base(expr.rhs().innerVector(outer) .cwiseProduct(expr.lhs().diagonal()), 0), m_outer(outer) - {} - - inline Index outer() const { return m_outer; } - inline Index col() const { return m_outer; } -}; - -template -class sparse_diagonal_product_inner_iterator_selector - - : public CwiseUnaryOp,const Lhs>::InnerIterator -{ - typedef typename CwiseUnaryOp,const Lhs>::InnerIterator Base; - typedef typename Lhs::Index Index; - public: - inline sparse_diagonal_product_inner_iterator_selector( - const SparseDiagonalProductType& expr, Index outer) - : Base(expr.lhs()*expr.rhs().diagonal().coeff(outer), outer) - {} -}; - -template -class sparse_diagonal_product_inner_iterator_selector - - : public CwiseBinaryOp< - scalar_product_op, - const typename Lhs::ConstInnerVectorReturnType, - const Transpose >::InnerIterator -{ - typedef typename CwiseBinaryOp< - scalar_product_op, - const typename Lhs::ConstInnerVectorReturnType, - const Transpose >::InnerIterator Base; - typedef typename Lhs::Index Index; - Index m_outer; - public: - inline sparse_diagonal_product_inner_iterator_selector( - const SparseDiagonalProductType& expr, Index outer) - : Base(expr.lhs().innerVector(outer) .cwiseProduct(expr.rhs().diagonal().transpose()), 0), m_outer(outer) - {} - - inline Index outer() const { return m_outer; } - inline Index row() const { return m_outer; } -}; - -#else // EIGEN_TEST_EVALUATORS enum { SDP_AsScalarProduct, SDP_AsCwiseProduct @@ -303,23 +140,8 @@ protected: : SparseXprType::ColsAtCompileTime>::type m_diagCoeffNested; }; -#endif // EIGEN_TEST_EVALUATORS - - } // end namespace internal -#ifndef EIGEN_TEST_EVALUATORS - -// SparseMatrixBase functions -template -template -const SparseDiagonalProduct -SparseMatrixBase::operator*(const DiagonalBase &other) const -{ - return SparseDiagonalProduct(this->derived(), other.derived()); -} -#endif // EIGEN_TEST_EVALUATORS - } // end namespace Eigen #endif // EIGEN_SPARSE_DIAGONAL_PRODUCT_H diff --git a/Eigen/src/SparseCore/SparseDot.h b/Eigen/src/SparseCore/SparseDot.h index a63cb003c..b10c8058f 100644 --- a/Eigen/src/SparseCore/SparseDot.h +++ b/Eigen/src/SparseCore/SparseDot.h @@ -26,12 +26,8 @@ SparseMatrixBase::dot(const MatrixBase& other) const eigen_assert(size() == other.size()); eigen_assert(other.size()>0 && "you are using a non initialized vector"); -#ifndef EIGEN_TEST_EVALUATORS - typename Derived::InnerIterator i(derived(),0); -#else typename internal::evaluator::type thisEval(derived()); typename internal::evaluator::InnerIterator i(thisEval, 0); -#endif Scalar res(0); while (i) { @@ -54,24 +50,12 @@ SparseMatrixBase::dot(const SparseMatrixBase& other) cons eigen_assert(size() == other.size()); -#ifndef EIGEN_TEST_EVALUATORS - typedef typename Derived::Nested Nested; - typedef typename OtherDerived::Nested OtherNested; - typedef typename internal::remove_all::type NestedCleaned; - typedef typename internal::remove_all::type OtherNestedCleaned; - - Nested nthis(derived()); - OtherNested nother(other.derived()); - - typename NestedCleaned::InnerIterator i(nthis,0); - typename OtherNestedCleaned::InnerIterator j(nother,0); -#else typename internal::evaluator::type thisEval(derived()); typename internal::evaluator::InnerIterator i(thisEval, 0); typename internal::evaluator::type otherEval(other.derived()); typename internal::evaluator::InnerIterator j(otherEval, 0); -#endif + Scalar res(0); while (i && j) { diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h index 9e2dae1b3..72368ebf3 100644 --- a/Eigen/src/SparseCore/SparseMatrix.h +++ b/Eigen/src/SparseCore/SparseMatrix.h @@ -52,9 +52,6 @@ struct traits > MaxRowsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic, Flags = _Options | NestByRefBit | LvalueBit, -#ifndef EIGEN_TEST_EVALUATORS - CoeffReadCost = NumTraits::ReadCost, -#endif SupportedAccessPatterns = InnerRandomAccessPattern }; }; @@ -77,9 +74,6 @@ struct traits, DiagIndex> MaxRowsAtCompileTime = Dynamic, MaxColsAtCompileTime = 1, Flags = 0 -#ifndef EIGEN_TEST_EVALUATORS - , CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10 -#endif }; }; @@ -653,13 +647,9 @@ class SparseMatrix EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) check_template_parameters(); -#ifndef EIGEN_TEST_EVALUATORS - *this = other.derived(); -#else const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator::Flags & RowMajorBit); if (needToTranspose) *this = other.derived(); else internal::call_assignment_no_alias(*this, other.derived()); -#endif } /** Constructs a sparse matrix from the sparse selfadjoint view \a other */ @@ -668,11 +658,7 @@ class SparseMatrix : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { check_template_parameters(); -#ifndef EIGEN_TEST_EVALUATORS - *this = other; -#else Base::operator=(other); -#endif } /** Copy constructor (it performs a deep copy) */ @@ -737,19 +723,6 @@ class SparseMatrix } #ifndef EIGEN_PARSED_BY_DOXYGEN -#ifndef EIGEN_TEST_EVALUATORS - template - inline SparseMatrix& operator=(const SparseSparseProduct& product) - { return Base::operator=(product); } - - template - inline SparseMatrix& operator=(const ReturnByValue& other) - { - initAssignment(other); - return Base::operator=(other.derived()); - } -#endif // EIGEN_TEST_EVALUATORS - template inline SparseMatrix& operator=(const EigenBase& other) { return Base::operator=(other.derived()); } @@ -1071,69 +1044,6 @@ void SparseMatrix::sumupDuplicates() m_data.resize(m_outerIndex[m_outerSize]); } -#ifndef EIGEN_TEST_EVALUATORS -template -template -EIGEN_DONT_INLINE SparseMatrix& SparseMatrix::operator=(const SparseMatrixBase& other) -{ - EIGEN_STATIC_ASSERT((internal::is_same::value), - YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) - - const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); - if (needToTranspose) - { - // two passes algorithm: - // 1 - compute the number of coeffs per dest inner vector - // 2 - do the actual copy/eval - // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed - typedef typename internal::nested::type OtherCopy; - typedef typename internal::remove_all::type _OtherCopy; - OtherCopy otherCopy(other.derived()); - - SparseMatrix dest(other.rows(),other.cols()); - Eigen::Map > (dest.m_outerIndex,dest.outerSize()).setZero(); - - // pass 1 - // FIXME the above copy could be merged with that pass - for (Index j=0; j positions(dest.outerSize()); - for (Index j=0; jswap(dest); - return *this; - } - else - { - if(other.isRValue()) - initAssignment(other.derived()); - // there is no special optimization - return Base::operator=(other.derived()); - } -} -#else template template EIGEN_DONT_INLINE SparseMatrix& SparseMatrix::operator=(const SparseMatrixBase& other) @@ -1199,7 +1109,6 @@ EIGEN_DONT_INLINE SparseMatrix& SparseMatrix EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col) @@ -1340,7 +1249,6 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse return (m_data.value(p) = 0); } -#ifdef EIGEN_ENABLE_EVALUATORS namespace internal { template @@ -1370,7 +1278,6 @@ struct evaluator > }; } -#endif } // end namespace Eigen diff --git a/Eigen/src/SparseCore/SparseMatrixBase.h b/Eigen/src/SparseCore/SparseMatrixBase.h index aff4c8b6f..b5c50d93a 100644 --- a/Eigen/src/SparseCore/SparseMatrixBase.h +++ b/Eigen/src/SparseCore/SparseMatrixBase.h @@ -79,13 +79,6 @@ template class SparseMatrixBase : public EigenBase * constructed from this one. See the \ref flags "list of flags". */ -#ifndef EIGEN_TEST_EVALUATORS - CoeffReadCost = internal::traits::CoeffReadCost, - /**< This is a rough measure of how expensive it is to read one coefficient from - * this expression. - */ -#endif - IsRowMajor = Flags&RowMajorBit ? 1 : 0, InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime) @@ -189,11 +182,6 @@ template class SparseMatrixBase : public EigenBase public: -#ifndef EIGEN_TEST_EVALUATORS - template - inline Derived& operator=(const SparseSparseProduct& product); -#endif - friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m) { typedef typename Derived::Nested Nested; @@ -265,36 +253,6 @@ template class SparseMatrixBase : public EigenBase EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE cwiseProduct(const MatrixBase &other) const; -#ifndef EIGEN_TEST_EVALUATORS - // sparse * sparse - template - const typename SparseSparseProductReturnType::Type - operator*(const SparseMatrixBase &other) const; - - // sparse * diagonal - template - const SparseDiagonalProduct - operator*(const DiagonalBase &other) const; - - // diagonal * sparse - template friend - const SparseDiagonalProduct - operator*(const DiagonalBase &lhs, const SparseMatrixBase& rhs) - { return SparseDiagonalProduct(lhs.derived(), rhs.derived()); } - - /** dense * sparse (return a dense object unless it is an outer product) */ - template friend - const typename DenseSparseProductReturnType::Type - operator*(const MatrixBase& lhs, const Derived& rhs) - { return typename DenseSparseProductReturnType::Type(lhs.derived(),rhs); } - - /** sparse * dense (returns a dense object unless it is an outer product) */ - template - const typename SparseDenseProductReturnType::Type - operator*(const MatrixBase &other) const - { return typename SparseDenseProductReturnType::Type(derived(), other.derived()); } - -#else // EIGEN_TEST_EVALUATORS // sparse * diagonal template const Product @@ -323,7 +281,6 @@ template class SparseMatrixBase : public EigenBase const Product operator*(const MatrixBase &lhs, const SparseMatrixBase& rhs) { return Product(lhs.derived(), rhs.derived()); } -#endif // EIGEN_TEST_EVALUATORS /** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */ SparseSymmetricPermutationProduct twistedBy(const PermutationMatrix& perm) const @@ -360,18 +317,6 @@ template class SparseMatrixBase : public EigenBase Block innerVectors(Index outerStart, Index outerSize); const Block innerVectors(Index outerStart, Index outerSize) const; -#ifndef EIGEN_TEST_EVALUATORS - /** \internal use operator= */ - template - void evalTo(MatrixBase& dst) const - { - dst.setZero(); - for (Index j=0; j toDense() const { return derived(); diff --git a/Eigen/src/SparseCore/SparsePermutation.h b/Eigen/src/SparseCore/SparsePermutation.h index a888ae9e1..228796bf8 100644 --- a/Eigen/src/SparseCore/SparsePermutation.h +++ b/Eigen/src/SparseCore/SparsePermutation.h @@ -103,48 +103,6 @@ struct permut_sparsematrix_product_retval } -#ifndef EIGEN_TEST_EVALUATORS - -/** \returns the matrix with the permutation applied to the columns - */ -template -inline const internal::permut_sparsematrix_product_retval, SparseDerived, OnTheRight, false> -operator*(const SparseMatrixBase& matrix, const PermutationBase& perm) -{ - return internal::permut_sparsematrix_product_retval, SparseDerived, OnTheRight, false>(perm, matrix.derived()); -} - -/** \returns the matrix with the permutation applied to the rows - */ -template -inline const internal::permut_sparsematrix_product_retval, SparseDerived, OnTheLeft, false> -operator*( const PermutationBase& perm, const SparseMatrixBase& matrix) -{ - return internal::permut_sparsematrix_product_retval, SparseDerived, OnTheLeft, false>(perm, matrix.derived()); -} - - - -/** \returns the matrix with the inverse permutation applied to the columns. - */ -template -inline const internal::permut_sparsematrix_product_retval, SparseDerived, OnTheRight, true> -operator*(const SparseMatrixBase& matrix, const Transpose >& tperm) -{ - return internal::permut_sparsematrix_product_retval, SparseDerived, OnTheRight, true>(tperm.nestedPermutation(), matrix.derived()); -} - -/** \returns the matrix with the inverse permutation applied to the rows. - */ -template -inline const internal::permut_sparsematrix_product_retval, SparseDerived, OnTheLeft, true> -operator*(const Transpose >& tperm, const SparseMatrixBase& matrix) -{ - return internal::permut_sparsematrix_product_retval, SparseDerived, OnTheLeft, true>(tperm.nestedPermutation(), matrix.derived()); -} - -#else // EIGEN_TEST_EVALUATORS - namespace internal { template struct product_promote_storage_type { typedef Sparse ret; }; @@ -274,8 +232,6 @@ operator*(const Transpose >& tperm, const SparseMat return Product >, SparseDerived>(tperm, matrix.derived()); } -#endif // EIGEN_TEST_EVALUATORS - } // end namespace Eigen #endif // EIGEN_SPARSE_SELFADJOINTVIEW_H diff --git a/Eigen/src/SparseCore/SparseProduct.h b/Eigen/src/SparseCore/SparseProduct.h index 18f40b9d9..b68fe986a 100644 --- a/Eigen/src/SparseCore/SparseProduct.h +++ b/Eigen/src/SparseCore/SparseProduct.h @@ -12,182 +12,6 @@ namespace Eigen { -#ifndef EIGEN_TEST_EVALUATORS - -template -struct SparseSparseProductReturnType -{ - typedef typename internal::traits::Scalar Scalar; - typedef typename internal::traits::Index Index; - enum { - LhsRowMajor = internal::traits::Flags & RowMajorBit, - RhsRowMajor = internal::traits::Flags & RowMajorBit, - TransposeRhs = (!LhsRowMajor) && RhsRowMajor, - TransposeLhs = LhsRowMajor && (!RhsRowMajor) - }; - - typedef typename internal::conditional, - typename internal::nested::type>::type LhsNested; - - typedef typename internal::conditional, - typename internal::nested::type>::type RhsNested; - - typedef SparseSparseProduct Type; -}; - -namespace internal { -template -struct traits > -{ - typedef MatrixXpr XprKind; - // clean the nested types: - typedef typename remove_all::type _LhsNested; - typedef typename remove_all::type _RhsNested; - typedef typename _LhsNested::Scalar Scalar; - typedef typename promote_index_type::Index, - typename traits<_RhsNested>::Index>::type Index; - - enum { - LhsCoeffReadCost = _LhsNested::CoeffReadCost, - RhsCoeffReadCost = _RhsNested::CoeffReadCost, - LhsFlags = _LhsNested::Flags, - RhsFlags = _RhsNested::Flags, - - RowsAtCompileTime = _LhsNested::RowsAtCompileTime, - ColsAtCompileTime = _RhsNested::ColsAtCompileTime, - MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime, - MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime, - - InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime), - - EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit), - - RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit), - - Flags = (int(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits) - | EvalBeforeAssigningBit - | EvalBeforeNestingBit, - - CoeffReadCost = Dynamic - }; - - typedef Sparse StorageKind; -}; - -} // end namespace internal - -template -class SparseSparseProduct : internal::no_assignment_operator, - public SparseMatrixBase > -{ - public: - - typedef SparseMatrixBase Base; - EIGEN_DENSE_PUBLIC_INTERFACE(SparseSparseProduct) - - private: - - typedef typename internal::traits::_LhsNested _LhsNested; - typedef typename internal::traits::_RhsNested _RhsNested; - - public: - - template - EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs) - : m_lhs(lhs), m_rhs(rhs), m_tolerance(0), m_conservative(true) - { - init(); - } - - template - EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs, const RealScalar& tolerance) - : m_lhs(lhs), m_rhs(rhs), m_tolerance(tolerance), m_conservative(false) - { - init(); - } - - SparseSparseProduct pruned(const Scalar& reference = 0, const RealScalar& epsilon = NumTraits::dummy_precision()) const - { - using std::abs; - return SparseSparseProduct(m_lhs,m_rhs,abs(reference)*epsilon); - } - - template - void evalTo(Dest& result) const - { - if(m_conservative) - internal::conservative_sparse_sparse_product_selector<_LhsNested, _RhsNested, Dest>::run(lhs(),rhs(),result); - else - internal::sparse_sparse_product_with_pruning_selector<_LhsNested, _RhsNested, Dest>::run(lhs(),rhs(),result,m_tolerance); - } - - EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); } - EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); } - - EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; } - EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; } - - protected: - void init() - { - eigen_assert(m_lhs.cols() == m_rhs.rows()); - - enum { - ProductIsValid = _LhsNested::ColsAtCompileTime==Dynamic - || _RhsNested::RowsAtCompileTime==Dynamic - || int(_LhsNested::ColsAtCompileTime)==int(_RhsNested::RowsAtCompileTime), - AreVectors = _LhsNested::IsVectorAtCompileTime && _RhsNested::IsVectorAtCompileTime, - SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(_LhsNested,_RhsNested) - }; - // note to the lost user: - // * for a dot product use: v1.dot(v2) - // * for a coeff-wise product use: v1.cwise()*v2 - EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), - INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) - EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), - INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) - EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) - } - - LhsNested m_lhs; - RhsNested m_rhs; - RealScalar m_tolerance; - bool m_conservative; -}; - -// sparse = sparse * sparse -template -template -inline Derived& SparseMatrixBase::operator=(const SparseSparseProduct& product) -{ - product.evalTo(derived()); - return derived(); -} - -/** \returns an expression of the product of two sparse matrices. - * By default a conservative product preserving the symbolic non zeros is performed. - * The automatic pruning of the small values can be achieved by calling the pruned() function - * in which case a totally different product algorithm is employed: - * \code - * C = (A*B).pruned(); // supress numerical zeros (exact) - * C = (A*B).pruned(ref); - * C = (A*B).pruned(ref,epsilon); - * \endcode - * where \c ref is a meaningful non zero reference value. - * */ -template -template -inline const typename SparseSparseProductReturnType::Type -SparseMatrixBase::operator*(const SparseMatrixBase &other) const -{ - return typename SparseSparseProductReturnType::Type(derived(), other.derived()); -} - -#else // EIGEN_TEST_EVALUATORS - - /** \returns an expression of the product of two sparse matrices. * By default a conservative product preserving the symbolic non zeros is performed. * The automatic pruning of the small values can be achieved by calling the pruned() function @@ -256,8 +80,6 @@ protected: } // end namespace internal -#endif // EIGEN_TEST_EVALUATORS - } // end namespace Eigen #endif // EIGEN_SPARSEPRODUCT_H diff --git a/Eigen/src/SparseCore/SparseRedux.h b/Eigen/src/SparseCore/SparseRedux.h index cf78d0e91..763f2296b 100644 --- a/Eigen/src/SparseCore/SparseRedux.h +++ b/Eigen/src/SparseCore/SparseRedux.h @@ -18,14 +18,9 @@ SparseMatrixBase::sum() const { eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); Scalar res(0); -#ifndef EIGEN_TEST_EVALUATORS - for (Index j=0; j::type thisEval(derived()); for (Index j=0; j::InnerIterator iter(thisEval,j); iter; ++iter) -#endif res += iter.value(); return res; } diff --git a/Eigen/src/SparseCore/SparseSelfAdjointView.h b/Eigen/src/SparseCore/SparseSelfAdjointView.h index 4235d6c4c..69ac1a398 100644 --- a/Eigen/src/SparseCore/SparseSelfAdjointView.h +++ b/Eigen/src/SparseCore/SparseSelfAdjointView.h @@ -11,16 +11,6 @@ #define EIGEN_SPARSE_SELFADJOINTVIEW_H namespace Eigen { - -#ifndef EIGEN_TEST_EVALUATORS - -template -class SparseSelfAdjointTimeDenseProduct; - -template -class DenseTimeSparseSelfAdjointProduct; - -#endif // EIGEN_TEST_EVALUATORS /** \ingroup SparseCore_Module * \class SparseSelfAdjointView @@ -80,76 +70,40 @@ template class SparseSelfAdjointView * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product. * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product. */ -#ifndef EIGEN_TEST_EVALUATORS - template - SparseSparseProduct - operator*(const SparseMatrixBase& rhs) const - { - return SparseSparseProduct(*this, rhs.derived()); - } -#else template Product operator*(const SparseMatrixBase& rhs) const { return Product(*this, rhs.derived()); } -#endif /** \returns an expression of the matrix product between a sparse matrix \a lhs and a sparse self-adjoint matrix \a rhs. * * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product. * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product. */ -#ifndef EIGEN_TEST_EVALUATORS - template friend - SparseSparseProduct - operator*(const SparseMatrixBase& lhs, const SparseSelfAdjointView& rhs) - { - return SparseSparseProduct(lhs.derived(), rhs); - } -#else // EIGEN_TEST_EVALUATORS template friend Product operator*(const SparseMatrixBase& lhs, const SparseSelfAdjointView& rhs) { return Product(lhs.derived(), rhs); } -#endif // EIGEN_TEST_EVALUATORS /** Efficient sparse self-adjoint matrix times dense vector/matrix product */ -#ifndef EIGEN_TEST_EVALUATORS - template - SparseSelfAdjointTimeDenseProduct - operator*(const MatrixBase& rhs) const - { - return SparseSelfAdjointTimeDenseProduct(m_matrix, rhs.derived()); - } -#else template Product operator*(const MatrixBase& rhs) const { return Product(*this, rhs.derived()); } -#endif /** Efficient dense vector/matrix times sparse self-adjoint matrix product */ -#ifndef EIGEN_TEST_EVALUATORS - template friend - DenseTimeSparseSelfAdjointProduct - operator*(const MatrixBase& lhs, const SparseSelfAdjointView& rhs) - { - return DenseTimeSparseSelfAdjointProduct(lhs.derived(), rhs.m_matrix); - } -#else template friend Product operator*(const MatrixBase& lhs, const SparseSelfAdjointView& rhs) { return Product(lhs.derived(), rhs); } -#endif /** Perform a symmetric rank K update of the selfadjoint matrix \c *this: * \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix. @@ -177,7 +131,7 @@ template class SparseSelfAdjointView } /** \returns an expression of P H P^-1 */ -// #ifndef EIGEN_TEST_EVALUATORS + // TODO implement twists in a more evaluator friendly fashion SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix& perm) const { return SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode>(m_matrix, perm); @@ -189,7 +143,6 @@ template class SparseSelfAdjointView permutedMatrix.evalTo(*this); return *this; } -// #endif // EIGEN_TEST_EVALUATORS SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src) { @@ -251,98 +204,6 @@ SparseSelfAdjointView::rankUpdate(const SparseMatrixBase -struct traits > - : traits, Lhs, Rhs> > -{ - typedef Dense StorageKind; -}; -} - -template -class SparseSelfAdjointTimeDenseProduct - : public ProductBase, Lhs, Rhs> -{ - public: - EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseSelfAdjointTimeDenseProduct) - - SparseSelfAdjointTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) - {} - - template void scaleAndAddTo(Dest& dest, const Scalar& alpha) const - { - EIGEN_ONLY_USED_FOR_DEBUG(alpha); - // TODO use alpha - eigen_assert(alpha==Scalar(1) && "alpha != 1 is not implemented yet, sorry"); - typedef typename internal::remove_all::type _Lhs; - typedef typename _Lhs::InnerIterator LhsInnerIterator; - enum { - LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit, - ProcessFirstHalf = - ((Mode&(Upper|Lower))==(Upper|Lower)) - || ( (Mode&Upper) && !LhsIsRowMajor) - || ( (Mode&Lower) && LhsIsRowMajor), - ProcessSecondHalf = !ProcessFirstHalf - }; - for (typename _Lhs::Index j=0; j -struct traits > - : traits, Lhs, Rhs> > -{}; -} - -template -class DenseTimeSparseSelfAdjointProduct - : public ProductBase, Lhs, Rhs> -{ - public: - EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseSelfAdjointProduct) - - DenseTimeSparseSelfAdjointProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) - {} - - template void scaleAndAddTo(Dest& /*dest*/, const Scalar& /*alpha*/) const - { - // TODO - } - - private: - DenseTimeSparseSelfAdjointProduct& operator=(const DenseTimeSparseSelfAdjointProduct&); -}; - -#else // EIGEN_TEST_EVALUATORS - namespace internal { template @@ -486,8 +347,6 @@ protected: } // namespace internal -#endif // EIGEN_TEST_EVALUATORS - /*************************************************************************** * Implementation of symmetric copies and permutations ***************************************************************************/ @@ -644,7 +503,7 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix(this); } const Derived& derived() const { return *static_cast(this); } -#ifdef EIGEN_TEST_EVALUATORS /** \returns an expression of the solution x of \f$ A x = b \f$ using the current decomposition of A. * * \sa compute() @@ -91,17 +90,15 @@ class SparseSolverBase : internal::noncopyable eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b"); return Solve(derived(), b.derived()); } -#endif - - -#ifndef EIGEN_PARSED_BY_DOXYGEN + + #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal default implementation of solving with a sparse rhs */ template void _solve_impl(const SparseMatrixBase &b, SparseMatrixBase &dest) const { internal::solve_sparse_through_dense_panels(derived(), b.derived(), dest.derived()); } -#endif // EIGEN_PARSED_BY_DOXYGEN + #endif // EIGEN_PARSED_BY_DOXYGEN protected: diff --git a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h index b42b33e55..f291f8cef 100644 --- a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +++ b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h @@ -47,10 +47,8 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r else res.resize(rows, cols); - #ifdef EIGEN_TEST_EVALUATORS typename evaluator::type lhsEval(lhs); typename evaluator::type rhsEval(rhs); - #endif res.reserve(estimated_nnz_prod); double ratioColRes = double(estimated_nnz_prod)/double(lhs.rows()*rhs.cols()); @@ -61,20 +59,12 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r // let's do a more accurate determination of the nnz ratio for the current column j of res tempVector.init(ratioColRes); tempVector.setZero(); -#ifndef EIGEN_TEST_EVALUATORS - for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt) -#else for (typename evaluator::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt) -#endif { // FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index()) tempVector.restart(); Scalar x = rhsIt.value(); -#ifndef EIGEN_TEST_EVALUATORS - for (typename Lhs::InnerIterator lhsIt(lhs, rhsIt.index()); lhsIt; ++lhsIt) -#else for (typename evaluator::InnerIterator lhsIt(lhsEval, rhsIt.index()); lhsIt; ++lhsIt) -#endif { tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x; } @@ -153,10 +143,6 @@ struct sparse_sparse_product_with_pruning_selector struct sparse_sparse_product_with_pruning_selector { @@ -204,7 +190,6 @@ struct sparse_sparse_product_with_pruning_selector(colLhs, rhs, res, tolerance); } }; -#endif } // end namespace internal diff --git a/Eigen/src/SparseCore/SparseTranspose.h b/Eigen/src/SparseCore/SparseTranspose.h index f5eff6133..fae7cae97 100644 --- a/Eigen/src/SparseCore/SparseTranspose.h +++ b/Eigen/src/SparseCore/SparseTranspose.h @@ -12,55 +12,6 @@ namespace Eigen { -#ifndef EIGEN_TEST_EVALUATORS -template class TransposeImpl - : public SparseMatrixBase > -{ - typedef typename internal::remove_all::type _MatrixTypeNested; - public: - - EIGEN_SPARSE_PUBLIC_INTERFACE(Transpose ) - - class InnerIterator; - class ReverseInnerIterator; - - inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); } -}; - -// NOTE: VC10 trigger an ICE if don't put typename TransposeImpl:: in front of Index, -// a typedef typename TransposeImpl::Index Index; -// does not fix the issue. -// An alternative is to define the nested class in the parent class itself. -template class TransposeImpl::InnerIterator - : public _MatrixTypeNested::InnerIterator -{ - typedef typename _MatrixTypeNested::InnerIterator Base; - typedef typename TransposeImpl::Index Index; - public: - - EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, typename TransposeImpl::Index outer) - : Base(trans.derived().nestedExpression(), outer) - {} - Index row() const { return Base::col(); } - Index col() const { return Base::row(); } -}; - -template class TransposeImpl::ReverseInnerIterator - : public _MatrixTypeNested::ReverseInnerIterator -{ - typedef typename _MatrixTypeNested::ReverseInnerIterator Base; - typedef typename TransposeImpl::Index Index; - public: - - EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, typename TransposeImpl::Index outer) - : Base(xpr.derived().nestedExpression(), outer) - {} - Index row() const { return Base::col(); } - Index col() const { return Base::row(); } -}; - -#else // EIGEN_TEST_EVALUATORS - // Implement nonZeros() for transpose. I'm not sure that's the best approach for that. // Perhaps it should be implemented in Transpose<> itself. template class TransposeImpl @@ -119,8 +70,6 @@ struct unary_evaluator, IteratorBased> } // end namespace internal -#endif // EIGEN_TEST_EVALUATORS - } // end namespace Eigen #endif // EIGEN_SPARSETRANSPOSE_H diff --git a/Eigen/src/SparseCore/SparseTriangularView.h b/Eigen/src/SparseCore/SparseTriangularView.h index 3df7a4cd4..744c3d730 100644 --- a/Eigen/src/SparseCore/SparseTriangularView.h +++ b/Eigen/src/SparseCore/SparseTriangularView.h @@ -40,11 +40,6 @@ protected: typedef typename internal::remove_reference::type MatrixTypeNestedNonRef; typedef typename internal::remove_all::type MatrixTypeNestedCleaned; -#ifndef EIGEN_TEST_EVALUATORS - template - typename internal::plain_matrix_type_column_major::type - solve(const MatrixBase& other) const; -#else // EIGEN_TEST_EVALUATORS template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _solve_impl(const RhsType &rhs, DstType &dst) const { @@ -52,7 +47,6 @@ protected: dst = rhs; this->solveInPlace(dst); } -#endif // EIGEN_TEST_EVALUATORS template void solveInPlace(MatrixBase& other) const; template void solveInPlace(SparseMatrixBase& other) const; @@ -163,7 +157,6 @@ class TriangularViewImpl::ReverseInnerIterator : public } }; -#ifdef EIGEN_TEST_EVALUATORS namespace internal { template @@ -270,7 +263,6 @@ protected: }; } // end namespace internal -#endif template template diff --git a/Eigen/src/SparseCore/SparseUtil.h b/Eigen/src/SparseCore/SparseUtil.h index 28bf89bca..8de227b88 100644 --- a/Eigen/src/SparseCore/SparseUtil.h +++ b/Eigen/src/SparseCore/SparseUtil.h @@ -43,26 +43,6 @@ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \ EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \ EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=) -#ifndef EIGEN_TEST_EVALUATORS - -#define _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, BaseClass) \ - typedef BaseClass Base; \ - typedef typename Eigen::internal::traits::Scalar Scalar; \ - typedef typename Eigen::NumTraits::Real RealScalar; \ - typedef typename Eigen::internal::nested::type Nested; \ - typedef typename Eigen::internal::traits::StorageKind StorageKind; \ - typedef typename Eigen::internal::traits::Index Index; \ - enum { RowsAtCompileTime = Eigen::internal::traits::RowsAtCompileTime, \ - ColsAtCompileTime = Eigen::internal::traits::ColsAtCompileTime, \ - Flags = Eigen::internal::traits::Flags, \ - CoeffReadCost = Eigen::internal::traits::CoeffReadCost, \ - SizeAtCompileTime = Base::SizeAtCompileTime, \ - IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \ - using Base::derived; \ - using Base::const_cast_derived; - -#else // EIGEN_TEST_EVALUATORS - #define _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, BaseClass) \ typedef BaseClass Base; \ typedef typename Eigen::internal::traits::Scalar Scalar; \ @@ -78,8 +58,6 @@ EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=) using Base::derived; \ using Base::const_cast_derived; -#endif // EIGEN_TEST_EVALUATORS - #define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \ _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase) @@ -156,13 +134,11 @@ template struct plain_matrix_type typedef SparseMatrix<_Scalar, _Options, _Index> type; }; -#ifdef EIGEN_TEST_EVALUATORS template struct solve_traits { typedef typename sparse_eval::type PlainObject; }; -#endif template struct generic_xpr_base diff --git a/Eigen/src/SparseCore/SparseVector.h b/Eigen/src/SparseCore/SparseVector.h index 0f9aa9dd1..c9f9d61e9 100644 --- a/Eigen/src/SparseCore/SparseVector.h +++ b/Eigen/src/SparseCore/SparseVector.h @@ -422,30 +422,6 @@ class SparseVector::ReverseInnerIterator namespace internal { -#ifndef EIGEN_TEST_EVALUATORS -template< typename Dest, typename Src> -struct sparse_vector_assign_selector { - static void run(Dest& dst, const Src& src) { - eigen_internal_assert(src.innerSize()==src.size()); - for(typename Src::InnerIterator it(src, 0); it; ++it) - dst.insert(it.index()) = it.value(); - } -}; - -template< typename Dest, typename Src> -struct sparse_vector_assign_selector { - static void run(Dest& dst, const Src& src) { - eigen_internal_assert(src.outerSize()==src.size()); - for(typename Dest::Index i=0; i struct evaluator > : evaluator_base > @@ -492,7 +468,6 @@ struct sparse_vector_assign_selector { } } }; -#endif // EIGEN_TEST_EVALUATORS template< typename Dest, typename Src> struct sparse_vector_assign_selector { diff --git a/Eigen/src/SparseCore/SparseView.h b/Eigen/src/SparseCore/SparseView.h index c1c50f6e9..d10cc5a35 100644 --- a/Eigen/src/SparseCore/SparseView.h +++ b/Eigen/src/SparseCore/SparseView.h @@ -40,10 +40,6 @@ public: RealScalar m_epsilon = NumTraits::dummy_precision()) : m_matrix(mat), m_reference(m_reference), m_epsilon(m_epsilon) {} -#ifndef EIGEN_TEST_EVALUATORS - class InnerIterator; -#endif // EIGEN_TEST_EVALUATORS - inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } @@ -63,43 +59,6 @@ protected: RealScalar m_epsilon; }; -#ifndef EIGEN_TEST_EVALUATORS -template -class SparseView::InnerIterator : public _MatrixTypeNested::InnerIterator -{ - typedef typename SparseView::Index Index; -public: - typedef typename _MatrixTypeNested::InnerIterator IterBase; - InnerIterator(const SparseView& view, Index outer) : - IterBase(view.m_matrix, outer), m_view(view) - { - incrementToNonZero(); - } - - EIGEN_STRONG_INLINE InnerIterator& operator++() - { - IterBase::operator++(); - incrementToNonZero(); - return *this; - } - - using IterBase::value; - -protected: - const SparseView& m_view; - -private: - void incrementToNonZero() - { - while((bool(*this)) && internal::isMuchSmallerThan(value(), m_view.reference(), m_view.epsilon())) - { - IterBase::operator++(); - } - } -}; - -#else // EIGEN_TEST_EVALUATORS - namespace internal { // TODO find a way to unify the two following variants @@ -230,8 +189,6 @@ struct unary_evaluator, IndexBased> } // end namespace internal -#endif // EIGEN_TEST_EVALUATORS - template const SparseView MatrixBase::sparseView(const Scalar& reference, const typename NumTraits::Real& epsilon) const diff --git a/Eigen/src/SparseCore/TriangularSolver.h b/Eigen/src/SparseCore/TriangularSolver.h index 012a1bb75..98062e9c6 100644 --- a/Eigen/src/SparseCore/TriangularSolver.h +++ b/Eigen/src/SparseCore/TriangularSolver.h @@ -23,149 +23,6 @@ template::Flags) & RowMajorBit> struct sparse_solve_triangular_selector; -#ifndef EIGEN_TEST_EVALUATORS -// forward substitution, row-major -template -struct sparse_solve_triangular_selector -{ - typedef typename Rhs::Scalar Scalar; - typedef typename Lhs::Index Index; - static void run(const Lhs& lhs, Rhs& other) - { - for(Index col=0 ; col -struct sparse_solve_triangular_selector -{ - typedef typename Rhs::Scalar Scalar; - typedef typename Lhs::Index Index; - static void run(const Lhs& lhs, Rhs& other) - { - for(Index col=0 ; col=0 ; --i) - { - Scalar tmp = other.coeff(i,col); - Scalar l_ii = 0; - typename Lhs::InnerIterator it(lhs, i); - while(it && it.index() -struct sparse_solve_triangular_selector -{ - typedef typename Rhs::Scalar Scalar; - typedef typename Lhs::Index Index; - static void run(const Lhs& lhs, Rhs& other) - { - for(Index col=0 ; col -struct sparse_solve_triangular_selector -{ - typedef typename Rhs::Scalar Scalar; - typedef typename Lhs::Index Index; - static void run(const Lhs& lhs, Rhs& other) - { - for(Index col=0 ; col=0; --i) - { - Scalar& tmp = other.coeffRef(i,col); - if (tmp!=Scalar(0)) // optimization when other is actually sparse - { - if(!(Mode & UnitDiag)) - { - // TODO replace this by a binary search. make sure the binary search is safe for partially sorted elements - typename Lhs::ReverseInnerIterator it(lhs, i); - while(it && it.index()!=i) - --it; - eigen_assert(it && it.index()==i); - other.coeffRef(i,col) /= it.value(); - } - typename Lhs::InnerIterator it(lhs, i); - for(; it && it.index() struct sparse_solve_triangular_selector @@ -316,7 +173,6 @@ struct sparse_solve_triangular_selector } }; -#endif // EIGEN_TEST_EVALUATORS } // end namespace internal template @@ -338,18 +194,6 @@ void TriangularViewImpl::solveInPlace(MatrixBase -template -typename internal::plain_matrix_type_column_major::type -TriangularViewImpl::solve(const MatrixBase& other) const -{ - typename internal::plain_matrix_type_column_major::type res(other); - solveInPlace(res); - return res; -} -#endif - // pure sparse path namespace internal { diff --git a/Eigen/src/SparseLU/SparseLU.h b/Eigen/src/SparseLU/SparseLU.h index fb01f99cd..14d7e713e 100644 --- a/Eigen/src/SparseLU/SparseLU.h +++ b/Eigen/src/SparseLU/SparseLU.h @@ -173,36 +173,6 @@ class SparseLU : public SparseSolverBase >, m_diagpivotthresh = thresh; } -#ifndef EIGEN_TEST_EVALUATORS - /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A. - * - * \warning the destination matrix X in X = this->solve(B) must be colmun-major. - * - * \sa compute() - */ - template - inline const internal::solve_retval solve(const MatrixBase& B) const - { - eigen_assert(m_factorizationIsOk && "SparseLU is not initialized."); - eigen_assert(rows()==B.rows() - && "SparseLU::solve(): invalid number of rows of the right hand side matrix B"); - return internal::solve_retval(*this, B.derived()); - } - - /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A. - * - * \sa compute() - */ - template - inline const internal::sparse_solve_retval solve(const SparseMatrixBase& B) const - { - eigen_assert(m_factorizationIsOk && "SparseLU is not initialized."); - eigen_assert(rows()==B.rows() - && "SparseLU::solve(): invalid number of rows of the right hand side matrix B"); - return internal::sparse_solve_retval(*this, B.derived()); - } -#else - #ifdef EIGEN_PARSED_BY_DOXYGEN /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A. * @@ -214,8 +184,6 @@ class SparseLU : public SparseSolverBase >, inline const Solve solve(const MatrixBase& B) const; #endif // EIGEN_PARSED_BY_DOXYGEN -#endif // EIGEN_TEST_EVALUATORS - /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, @@ -749,37 +717,6 @@ struct SparseLUMatrixUReturnType : internal::no_assignment_operator const MatrixUType& m_mapU; }; -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef SparseLU<_MatrixType,Derived> Dec; - EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(),dst); - } -}; - -template -struct sparse_solve_retval, Rhs> - : sparse_solve_retval_base, Rhs> -{ - typedef SparseLU<_MatrixType,Derived> Dec; - EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - this->defaultEvalTo(dst); - } -}; -} // end namespace internal -#endif - } // End namespace Eigen #endif diff --git a/Eigen/src/SparseQR/SparseQR.h b/Eigen/src/SparseQR/SparseQR.h index 8e946b045..1a6c84e00 100644 --- a/Eigen/src/SparseQR/SparseQR.h +++ b/Eigen/src/SparseQR/SparseQR.h @@ -202,26 +202,6 @@ class SparseQR : public SparseSolverBase > m_threshold = threshold; } -#ifndef EIGEN_TEST_EVALUATORS - /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A. - * - * \sa compute() - */ - template - inline const internal::solve_retval solve(const MatrixBase& B) const - { - eigen_assert(m_isInitialized && "The factorization should be called first, use compute()"); - eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix"); - return internal::solve_retval(*this, B.derived()); - } - template - inline const internal::sparse_solve_retval solve(const SparseMatrixBase& B) const - { - eigen_assert(m_isInitialized && "The factorization should be called first, use compute()"); - eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix"); - return internal::sparse_solve_retval(*this, B.derived()); - } -#else /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A. * * \sa compute() @@ -240,7 +220,6 @@ class SparseQR : public SparseSolverBase > eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix"); return Solve(*this, B.derived()); } -#endif // EIGEN_TEST_EVALUATORS /** \brief Reports whether previous computation was successful. * @@ -577,36 +556,6 @@ void SparseQR::factorize(const MatrixType& mat) m_info = Success; } -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef SparseQR<_MatrixType,OrderingType> Dec; - EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(),dst); - } -}; -template -struct sparse_solve_retval, Rhs> - : sparse_solve_retval_base, Rhs> -{ - typedef SparseQR<_MatrixType, OrderingType> Dec; - EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec, Rhs) - - template void evalTo(Dest& dst) const - { - this->defaultEvalTo(dst); - } -}; -} // end namespace internal -#endif // EIGEN_TEST_EVALUATORS - template struct SparseQR_QProduct : ReturnByValue > { diff --git a/Eigen/src/SuperLUSupport/SuperLUSupport.h b/Eigen/src/SuperLUSupport/SuperLUSupport.h index fcecd4fcf..0137585ca 100644 --- a/Eigen/src/SuperLUSupport/SuperLUSupport.h +++ b/Eigen/src/SuperLUSupport/SuperLUSupport.h @@ -336,34 +336,6 @@ class SuperLUBase : public SparseSolverBase derived().analyzePattern(matrix); derived().factorize(matrix); } - -#ifndef EIGEN_TEST_EVALUATORS - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. - * - * \sa compute() - */ - template - inline const internal::solve_retval solve(const MatrixBase& b) const - { - eigen_assert(m_isInitialized && "SuperLU is not initialized."); - eigen_assert(rows()==b.rows() - && "SuperLU::solve(): invalid number of rows of the right hand side matrix b"); - return internal::solve_retval(*this, b.derived()); - } - - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. - * - * \sa compute() - */ - template - inline const internal::sparse_solve_retval solve(const SparseMatrixBase& b) const - { - eigen_assert(m_isInitialized && "SuperLU is not initialized."); - eigen_assert(rows()==b.rows() - && "SuperLU::solve(): invalid number of rows of the right hand side matrix b"); - return internal::sparse_solve_retval(*this, b.derived()); - } -#endif // EIGEN_TEST_EVALUATORS /** Performs a symbolic decomposition on the sparcity of \a matrix. * @@ -995,37 +967,6 @@ void SuperILU::_solve_impl(const MatrixBase &b, MatrixBase -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef SuperLUBase<_MatrixType,Derived> Dec; - EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec().derived()._solve_impl(rhs(),dst); - } -}; - -template -struct sparse_solve_retval, Rhs> - : sparse_solve_retval_base, Rhs> -{ - typedef SuperLUBase<_MatrixType,Derived> Dec; - EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - this->defaultEvalTo(dst); - } -}; - -} // end namespace internal -#endif } // end namespace Eigen #endif // EIGEN_SUPERLUSUPPORT_H diff --git a/Eigen/src/UmfPackSupport/UmfPackSupport.h b/Eigen/src/UmfPackSupport/UmfPackSupport.h index 845c8076a..7fada5567 100644 --- a/Eigen/src/UmfPackSupport/UmfPackSupport.h +++ b/Eigen/src/UmfPackSupport/UmfPackSupport.h @@ -201,34 +201,6 @@ class UmfPackLU : public SparseSolverBase > analyzePattern(matrix); factorize(matrix); } - -#ifndef EIGEN_TEST_EVALUATORS - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. - * - * \sa compute() - */ - template - inline const internal::solve_retval solve(const MatrixBase& b) const - { - eigen_assert(m_isInitialized && "UmfPackLU is not initialized."); - eigen_assert(rows()==b.rows() - && "UmfPackLU::solve(): invalid number of rows of the right hand side matrix b"); - return internal::solve_retval(*this, b.derived()); - } - - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. - * - * \sa compute() - */ - template - inline const internal::sparse_solve_retval solve(const SparseMatrixBase& b) const - { - eigen_assert(m_isInitialized && "UmfPackLU is not initialized."); - eigen_assert(rows()==b.rows() - && "UmfPackLU::solve(): invalid number of rows of the right hand side matrix b"); - return internal::sparse_solve_retval(*this, b.derived()); - } -#endif /** Performs a symbolic decomposition on the sparcity of \a matrix. * @@ -401,37 +373,6 @@ bool UmfPackLU::_solve_impl(const MatrixBase &b, MatrixBas return true; } -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef UmfPackLU<_MatrixType> Dec; - EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(),dst); - } -}; - -template -struct sparse_solve_retval, Rhs> - : sparse_solve_retval_base, Rhs> -{ - typedef UmfPackLU<_MatrixType> Dec; - EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - this->defaultEvalTo(dst); - } -}; - -} // end namespace internal -#endif } // end namespace Eigen #endif // EIGEN_UMFPACKSUPPORT_H diff --git a/Eigen/src/misc/Solve.h b/Eigen/src/misc/Solve.h deleted file mode 100644 index ebdd981d0..000000000 --- a/Eigen/src/misc/Solve.h +++ /dev/null @@ -1,78 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2009 Benoit Jacob -// -// This Source Code Form is subject to the terms of the Mozilla -// Public License v. 2.0. If a copy of the MPL was not distributed -// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - -#ifndef EIGEN_MISC_SOLVE_H -#define EIGEN_MISC_SOLVE_H - -#ifndef EIGEN_TEST_EVALUATORS - -namespace Eigen { - -namespace internal { - -/** \class solve_retval_base - * - */ -template -struct traits > -{ - typedef typename DecompositionType::MatrixType MatrixType; - typedef Matrix ReturnType; -}; - -template struct solve_retval_base - : public ReturnByValue > -{ - typedef typename remove_all::type RhsNestedCleaned; - typedef _DecompositionType DecompositionType; - typedef ReturnByValue Base; - typedef typename Base::Index Index; - - solve_retval_base(const DecompositionType& dec, const Rhs& rhs) - : m_dec(dec), m_rhs(rhs) - {} - - inline Index rows() const { return m_dec.cols(); } - inline Index cols() const { return m_rhs.cols(); } - inline const DecompositionType& dec() const { return m_dec; } - inline const RhsNestedCleaned& rhs() const { return m_rhs; } - - template inline void evalTo(Dest& dst) const - { - static_cast*>(this)->evalTo(dst); - } - - protected: - const DecompositionType& m_dec; - typename Rhs::Nested m_rhs; -}; - -} // end namespace internal - -#define EIGEN_MAKE_SOLVE_HELPERS(DecompositionType,Rhs) \ - typedef typename DecompositionType::MatrixType MatrixType; \ - typedef typename MatrixType::Scalar Scalar; \ - typedef typename MatrixType::RealScalar RealScalar; \ - typedef typename MatrixType::Index Index; \ - typedef Eigen::internal::solve_retval_base Base; \ - using Base::dec; \ - using Base::rhs; \ - using Base::rows; \ - using Base::cols; \ - solve_retval(const DecompositionType& dec, const Rhs& rhs) \ - : Base(dec, rhs) {} - -} // end namespace Eigen -#endif // EIGEN_TEST_EVALUATORS -#endif // EIGEN_MISC_SOLVE_H diff --git a/Eigen/src/misc/SparseSolve.h b/Eigen/src/misc/SparseSolve.h deleted file mode 100644 index 2396dc8e8..000000000 --- a/Eigen/src/misc/SparseSolve.h +++ /dev/null @@ -1,134 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2010 Gael Guennebaud -// -// This Source Code Form is subject to the terms of the Mozilla -// Public License v. 2.0. If a copy of the MPL was not distributed -// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - -#ifndef EIGEN_SPARSE_SOLVE_H -#define EIGEN_SPARSE_SOLVE_H - -#ifndef EIGEN_TEST_EVALUATORS - -namespace Eigen { - -namespace internal { - -template struct sparse_solve_retval_base; -template struct sparse_solve_retval; - -template -struct traits > -{ - typedef typename DecompositionType::MatrixType MatrixType; - typedef SparseMatrix ReturnType; -}; - -template struct sparse_solve_retval_base - : public ReturnByValue > -{ - typedef typename remove_all::type RhsNestedCleaned; - typedef _DecompositionType DecompositionType; - typedef ReturnByValue Base; - typedef typename Base::Index Index; - - sparse_solve_retval_base(const DecompositionType& dec, const Rhs& rhs) - : m_dec(dec), m_rhs(rhs) - {} - - inline Index rows() const { return m_dec.cols(); } - inline Index cols() const { return m_rhs.cols(); } - inline const DecompositionType& dec() const { return m_dec; } - inline const RhsNestedCleaned& rhs() const { return m_rhs; } - - template inline void evalTo(Dest& dst) const - { - static_cast*>(this)->evalTo(dst); - } - - protected: - template - inline void defaultEvalTo(SparseMatrix& dst) const - { - // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix. - static const int NbColsAtOnce = 4; - int rhsCols = m_rhs.cols(); - int size = m_rhs.rows(); - // the temporary matrices do not need more columns than NbColsAtOnce: - int tmpCols = (std::min)(rhsCols, NbColsAtOnce); - Eigen::Matrix tmp(size,tmpCols); - Eigen::Matrix tmpX(size,tmpCols); - for(int k=0; k(rhsCols-k, NbColsAtOnce); - tmp.leftCols(actualCols) = m_rhs.middleCols(k,actualCols); - tmpX.leftCols(actualCols) = m_dec.solve(tmp.leftCols(actualCols)); - dst.middleCols(k,actualCols) = tmpX.leftCols(actualCols).sparseView(); - } - } - const DecompositionType& m_dec; - typename Rhs::Nested m_rhs; -}; - -#define EIGEN_MAKE_SPARSE_SOLVE_HELPERS(DecompositionType,Rhs) \ - typedef typename DecompositionType::MatrixType MatrixType; \ - typedef typename MatrixType::Scalar Scalar; \ - typedef typename MatrixType::RealScalar RealScalar; \ - typedef typename MatrixType::Index Index; \ - typedef Eigen::internal::sparse_solve_retval_base Base; \ - using Base::dec; \ - using Base::rhs; \ - using Base::rows; \ - using Base::cols; \ - sparse_solve_retval(const DecompositionType& dec, const Rhs& rhs) \ - : Base(dec, rhs) {} - - - -template struct solve_retval_with_guess; - -template -struct traits > -{ - typedef typename DecompositionType::MatrixType MatrixType; - typedef Matrix ReturnType; -}; - -template struct solve_retval_with_guess - : public ReturnByValue > -{ - typedef typename DecompositionType::Index Index; - - solve_retval_with_guess(const DecompositionType& dec, const Rhs& rhs, const Guess& guess) - : m_dec(dec), m_rhs(rhs), m_guess(guess) - {} - - inline Index rows() const { return m_dec.cols(); } - inline Index cols() const { return m_rhs.cols(); } - - template inline void evalTo(Dest& dst) const - { - dst = m_guess; - m_dec._solveWithGuess(m_rhs,dst); - } - - protected: - const DecompositionType& m_dec; - const typename Rhs::Nested m_rhs; - const typename Guess::Nested m_guess; -}; - -} // namepsace internal - -} // end namespace Eigen - -#endif // EIGEN_TEST_EVALUATORS - -#endif // EIGEN_SPARSE_SOLVE_H diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 075b8d3de..530e9e4e1 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -139,11 +139,6 @@ endif(TEST_LIB) set_property(GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT "Official") add_custom_target(BuildOfficial) -option(EIGEN_TEST_NO_EVALUATORS "Disable evaluators in unit tests" OFF) -if(EIGEN_TEST_NO_EVALUATORS) - add_definitions("-DEIGEN_TEST_NO_EVALUATORS=1") -endif(EIGEN_TEST_NO_EVALUATORS) - ei_add_test(meta) ei_add_test(sizeof) ei_add_test(dynalloc) diff --git a/test/evaluators.cpp b/test/evaluators.cpp index 2ca453b1c..f41968da8 100644 --- a/test/evaluators.cpp +++ b/test/evaluators.cpp @@ -1,16 +1,4 @@ -#ifndef EIGEN_ENABLE_EVALUATORS -#define EIGEN_ENABLE_EVALUATORS -#endif - -#ifdef EIGEN_TEST_EVALUATORS -#undef EIGEN_TEST_EVALUATORS -#endif - -#ifdef EIGEN_TEST_NO_EVALUATORS -#undef EIGEN_TEST_NO_EVALUATORS -#endif - #include "main.h" namespace Eigen { diff --git a/test/mixingtypes.cpp b/test/mixingtypes.cpp index 976e21e37..048f7255a 100644 --- a/test/mixingtypes.cpp +++ b/test/mixingtypes.cpp @@ -53,12 +53,10 @@ template void mixingtypes(int size = SizeAtCompileType) mf+mf; VERIFY_RAISES_ASSERT(mf+md); VERIFY_RAISES_ASSERT(mf+mcf); -#ifndef EIGEN_TEST_EVALUATORS - // they do not even compile when using evaluators - VERIFY_RAISES_ASSERT(vf=vd); - VERIFY_RAISES_ASSERT(vf+=vd); - VERIFY_RAISES_ASSERT(mcd=md); -#endif + // the following do not even compile since the introduction of evaluators +// VERIFY_RAISES_ASSERT(vf=vd); +// VERIFY_RAISES_ASSERT(vf+=vd); +// VERIFY_RAISES_ASSERT(mcd=md); // check scalar products VERIFY_IS_APPROX(vcf * sf , vcf * complex(sf)); diff --git a/test/nesting_ops.cpp b/test/nesting_ops.cpp index 114dd5e41..6e772c70f 100644 --- a/test/nesting_ops.cpp +++ b/test/nesting_ops.cpp @@ -11,12 +11,7 @@ template void run_nesting_ops(const MatrixType& _m) { -#ifndef EIGEN_TEST_EVALUATORS - // TODO, with evaluator, the following is not correct anymore: - typename MatrixType::Nested m(_m); -#else typename internal::nested_eval::type m(_m); -#endif // Make really sure that we are in debug mode! VERIFY_RAISES_ASSERT(eigen_assert(false)); diff --git a/test/sparse_vector.cpp b/test/sparse_vector.cpp index 6cd5a9a8c..5eea9edfd 100644 --- a/test/sparse_vector.cpp +++ b/test/sparse_vector.cpp @@ -71,10 +71,7 @@ template void sparse_vector(int rows, int cols) VERIFY_IS_APPROX(v1.dot(v2), refV1.dot(refV2)); VERIFY_IS_APPROX(v1.dot(refV2), refV1.dot(refV2)); -#ifdef EIGEN_TEST_EVALUATORS - // the following did not compiled without evaluators VERIFY_IS_APPROX(m1*v2, refM1*refV2); -#endif VERIFY_IS_APPROX(v1.dot(m1*v2), refV1.dot(refM1*refV2)); int i = internal::random(0,rows-1); VERIFY_IS_APPROX(v1.dot(m1.col(i)), refV1.dot(refM1.col(i))); diff --git a/test/vectorization_logic.cpp b/test/vectorization_logic.cpp index 303eb6cf0..2f839cf51 100644 --- a/test/vectorization_logic.cpp +++ b/test/vectorization_logic.cpp @@ -45,22 +45,14 @@ std::string demangle_flags(int f) template bool test_assign(const Dst&, const Src&, int traversal, int unrolling) { -#ifdef EIGEN_TEST_EVALUATORS typedef internal::copy_using_evaluator_traits,internal::evaluator, internal::assign_op > traits; -#else - typedef internal::assign_traits traits; -#endif bool res = traits::Traversal==traversal && traits::Unrolling==unrolling; if(!res) { std::cerr << "Src: " << demangle_flags(Src::Flags) << std::endl; -#ifdef EIGEN_TEST_EVALUATORS std::cerr << " " << demangle_flags(internal::evaluator::Flags) << std::endl; -#endif std::cerr << "Dst: " << demangle_flags(Dst::Flags) << std::endl; -#ifdef EIGEN_TEST_EVALUATORS std::cerr << " " << demangle_flags(internal::evaluator::Flags) << std::endl; -#endif traits::debug(); std::cerr << " Expected Traversal == " << demangle_traversal(traversal) << " got " << demangle_traversal(traits::Traversal) << "\n"; @@ -73,22 +65,14 @@ bool test_assign(const Dst&, const Src&, int traversal, int unrolling) template bool test_assign(int traversal, int unrolling) { -#ifdef EIGEN_TEST_EVALUATORS typedef internal::copy_using_evaluator_traits,internal::evaluator, internal::assign_op > traits; -#else - typedef internal::assign_traits traits; -#endif bool res = traits::Traversal==traversal && traits::Unrolling==unrolling; if(!res) { std::cerr << "Src: " << demangle_flags(Src::Flags) << std::endl; -#ifdef EIGEN_TEST_EVALUATORS std::cerr << " " << demangle_flags(internal::evaluator::Flags) << std::endl; -#endif std::cerr << "Dst: " << demangle_flags(Dst::Flags) << std::endl; -#ifdef EIGEN_TEST_EVALUATORS std::cerr << " " << demangle_flags(internal::evaluator::Flags) << std::endl; -#endif traits::debug(); std::cerr << " Expected Traversal == " << demangle_traversal(traversal) << " got " << demangle_traversal(traits::Traversal) << "\n"; @@ -101,19 +85,13 @@ bool test_assign(int traversal, int unrolling) template bool test_redux(const Xpr&, int traversal, int unrolling) { -#ifdef EIGEN_TEST_EVALUATORS typedef internal::redux_traits,internal::redux_evaluator > traits; -#else - typedef internal::redux_traits,Xpr> traits; -#endif bool res = traits::Traversal==traversal && traits::Unrolling==unrolling; if(!res) { std::cerr << demangle_flags(Xpr::Flags) << std::endl; -#ifdef EIGEN_TEST_EVALUATORS std::cerr << demangle_flags(internal::evaluator::Flags) << std::endl; -#endif traits::debug(); std::cerr << " Expected Traversal == " << demangle_traversal(traversal) diff --git a/unsupported/Eigen/IterativeSolvers b/unsupported/Eigen/IterativeSolvers index aa15403db..ff0d59b6e 100644 --- a/unsupported/Eigen/IterativeSolvers +++ b/unsupported/Eigen/IterativeSolvers @@ -24,9 +24,6 @@ */ //@{ -#include "../../Eigen/src/misc/Solve.h" -#include "../../Eigen/src/misc/SparseSolve.h" - #ifndef EIGEN_MPL2_ONLY #include "src/IterativeSolvers/IterationController.h" #include "src/IterativeSolvers/ConstrainedConjGrad.h" diff --git a/unsupported/Eigen/SparseExtra b/unsupported/Eigen/SparseExtra index b5597902a..819cffa27 100644 --- a/unsupported/Eigen/SparseExtra +++ b/unsupported/Eigen/SparseExtra @@ -37,9 +37,6 @@ */ -#include "../../Eigen/src/misc/Solve.h" -#include "../../Eigen/src/misc/SparseSolve.h" - #include "src/SparseExtra/DynamicSparseMatrix.h" #include "src/SparseExtra/BlockOfDynamicSparseMatrix.h" #include "src/SparseExtra/RandomSetter.h" diff --git a/unsupported/Eigen/src/IterativeSolvers/DGMRES.h b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h index fe0bfd948..0e1b7d977 100644 --- a/unsupported/Eigen/src/IterativeSolvers/DGMRES.h +++ b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h @@ -139,24 +139,6 @@ class DGMRES : public IterativeSolverBase > ~DGMRES() {} -#ifndef EIGEN_TEST_EVALUATORS - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A - * \a x0 as an initial solution. - * - * \sa compute() - */ - template - inline const internal::solve_retval_with_guess - solveWithGuess(const MatrixBase& b, const Guess& x0) const - { - eigen_assert(m_isInitialized && "DGMRES is not initialized."); - eigen_assert(Base::rows()==b.rows() - && "DGMRES::solve(): invalid number of rows of the right hand side matrix b"); - return internal::solve_retval_with_guess - (*this, b.derived(), x0); - } -#endif - /** \internal */ template void _solve_with_guess_impl(const Rhs& b, Dest& x) const @@ -525,23 +507,5 @@ int DGMRES<_MatrixType, _Preconditioner>::dgmresApplyDeflation(const RhsType &x, return 0; } -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - - template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef DGMRES<_MatrixType, _Preconditioner> Dec; - EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(),dst); - } -}; -} // end namespace internal -#endif - } // end namespace Eigen #endif diff --git a/unsupported/Eigen/src/IterativeSolvers/GMRES.h b/unsupported/Eigen/src/IterativeSolvers/GMRES.h index fd76a9d2c..cd15ce0bf 100644 --- a/unsupported/Eigen/src/IterativeSolvers/GMRES.h +++ b/unsupported/Eigen/src/IterativeSolvers/GMRES.h @@ -316,24 +316,6 @@ public: */ void set_restart(const int restart) { m_restart=restart; } -#ifndef EIGEN_TEST_EVALUATORS - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A - * \a x0 as an initial solution. - * - * \sa compute() - */ - template - inline const internal::solve_retval_with_guess - solveWithGuess(const MatrixBase& b, const Guess& x0) const - { - eigen_assert(m_isInitialized && "GMRES is not initialized."); - eigen_assert(Base::rows()==b.rows() - && "GMRES::solve(): invalid number of rows of the right hand side matrix b"); - return internal::solve_retval_with_guess - (*this, b.derived(), x0); - } -#endif - /** \internal */ template void _solve_with_guess_impl(const Rhs& b, Dest& x) const @@ -367,24 +349,6 @@ protected: }; -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - - template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef GMRES<_MatrixType, _Preconditioner> Dec; - EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(),dst); - } -}; - -} // end namespace internal -#endif } // end namespace Eigen #endif // EIGEN_GMRES_H diff --git a/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h b/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h index 1ee1c89b2..dd43de6b3 100644 --- a/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h +++ b/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h @@ -106,18 +106,6 @@ class IncompleteCholesky : public SparseSolverBase inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(m_factorizationIsOk && "IncompleteLLT did not succeed"); - eigen_assert(m_isInitialized && "IncompleteLLT is not initialized."); - eigen_assert(cols()==b.rows() - && "IncompleteLLT::solve(): invalid number of rows of the right hand side matrix b"); - return internal::solve_retval(*this, b.derived()); - } -#endif protected: SparseMatrix m_L; // The lower part stored in CSC @@ -263,25 +251,6 @@ inline void IncompleteCholesky::updateList(const Idx } } -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef IncompleteCholesky<_Scalar, _UpLo, OrderingType> Dec; - EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve(rhs(),dst); - } -}; - -} // end namespace internal -#endif - } // end namespace Eigen #endif diff --git a/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h b/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h index e86f65644..7d08c3515 100644 --- a/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h +++ b/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h @@ -81,40 +81,10 @@ class IncompleteLU : public SparseSolverBase > x = m_lu.template triangularView().solve(x); } -#ifndef EIGEN_TEST_EVALUATORS - template inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(m_isInitialized && "IncompleteLU is not initialized."); - eigen_assert(cols()==b.rows() - && "IncompleteLU::solve(): invalid number of rows of the right hand side matrix b"); - return internal::solve_retval(*this, b.derived()); - } -#endif - protected: FactorType m_lu; }; -#ifndef EIGEN_TEST_EVALUATORS -namespace internal { - -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef IncompleteLU<_MatrixType> Dec; - EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(),dst); - } -}; - -} // end namespace internal -#endif - } // end namespace Eigen #endif // EIGEN_INCOMPLETE_LU_H diff --git a/unsupported/Eigen/src/IterativeSolvers/MINRES.h b/unsupported/Eigen/src/IterativeSolvers/MINRES.h index 28d5c692d..aaf42c78a 100644 --- a/unsupported/Eigen/src/IterativeSolvers/MINRES.h +++ b/unsupported/Eigen/src/IterativeSolvers/MINRES.h @@ -246,24 +246,6 @@ namespace Eigen { /** Destructor. */ ~MINRES(){} -#ifndef EIGEN_TEST_EVALUATORS - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A - * \a x0 as an initial solution. - * - * \sa compute() - */ - template - inline const internal::solve_retval_with_guess - solveWithGuess(const MatrixBase& b, const Guess& x0) const - { - eigen_assert(m_isInitialized && "MINRES is not initialized."); - eigen_assert(Base::rows()==b.rows() - && "MINRES::solve(): invalid number of rows of the right hand side matrix b"); - return internal::solve_retval_with_guess - (*this, b.derived(), x0); - } -#endif - /** \internal */ template void _solve_with_guess_impl(const Rhs& b, Dest& x) const @@ -296,25 +278,6 @@ namespace Eigen { protected: }; - -#ifndef EIGEN_TEST_EVALUATORS - namespace internal { - - template - struct solve_retval, Rhs> - : solve_retval_base, Rhs> - { - typedef MINRES<_MatrixType,_UpLo,_Preconditioner> Dec; - EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve_impl(rhs(),dst); - } - }; - - } // end namespace internal -#endif } // end namespace Eigen diff --git a/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h b/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h index 4210df68a..e4dc1c1de 100644 --- a/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h +++ b/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h @@ -352,7 +352,6 @@ class DynamicSparseMatrix::ReverseInnerIterator : public const Index m_outer; }; -#ifdef EIGEN_ENABLE_EVALUATORS namespace internal { template @@ -382,7 +381,6 @@ struct evaluator > }; } -#endif } // end namespace Eigen diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt index 970a05bbd..e45efbd39 100644 --- a/unsupported/test/CMakeLists.txt +++ b/unsupported/test/CMakeLists.txt @@ -6,10 +6,6 @@ include_directories(../../test ../../unsupported ../../Eigen ${CMAKE_CURRENT_BINARY_DIR}/../../test) -if(EIGEN_TEST_NO_EVALUATORS) - add_definitions("-DEIGEN_TEST_NO_EVALUATORS=1") -endif(EIGEN_TEST_NO_EVALUATORS) - find_package(GoogleHash) if(GOOGLEHASH_FOUND) add_definitions("-DEIGEN_GOOGLEHASH_SUPPORT") @@ -33,22 +29,18 @@ endif(ADOLC_FOUND) ei_add_test(NonLinearOptimization) ei_add_test(NumericalDiff) -if(EIGEN_TEST_NO_EVALUATORS) -ei_add_test(autodiff_scalar) -ei_add_test(autodiff) -endif() +# TODO ei_add_test(autodiff_scalar) +# TODO ei_add_test(autodiff) if (NOT CMAKE_CXX_COMPILER MATCHES "clang\\+\\+$") ei_add_test(BVH) endif() -if(EIGEN_TEST_NO_EVALUATORS) -ei_add_test(matrix_exponential) -ei_add_test(matrix_function) -ei_add_test(matrix_power) -ei_add_test(matrix_square_root) -ei_add_test(alignedvector3) -endif() +# TODO ei_add_test(matrix_exponential) +# TODO ei_add_test(matrix_function) +# TODO ei_add_test(matrix_power) +# TODO ei_add_test(matrix_square_root) +# TODO ei_add_test(alignedvector3) ei_add_test(FFT) @@ -101,9 +93,7 @@ ei_add_test(gmres) ei_add_test(minres) ei_add_test(levenberg_marquardt) ei_add_test(bdcsvd) -if(EIGEN_TEST_NO_EVALUATORS) -ei_add_test(kronecker_product) -endif() +# TODO ei_add_test(kronecker_product) option(EIGEN_TEST_CXX11 "Enable testing of C++11 features (e.g. Tensor module)." OFF) if(EIGEN_TEST_CXX11) -- cgit v1.2.3 From 060e835ee9f7a3abb9ff3d9f2522027bf2f98efe Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Thu, 18 Sep 2014 17:30:21 +0200 Subject: Add evaluator for the experimental AlignedVector3 --- unsupported/Eigen/AlignedVector3 | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'unsupported/Eigen') diff --git a/unsupported/Eigen/AlignedVector3 b/unsupported/Eigen/AlignedVector3 index 7b45e6cce..cd5f49edb 100644 --- a/unsupported/Eigen/AlignedVector3 +++ b/unsupported/Eigen/AlignedVector3 @@ -57,6 +57,9 @@ template class AlignedVector3 inline Index rows() const { return 3; } inline Index cols() const { return 1; } + + Scalar* data() { return m_coeffs.data(); } + const Scalar* data() const { return m_coeffs.data(); } inline const Scalar& coeff(Index row, Index col) const { return m_coeffs.coeff(row, col); } @@ -183,6 +186,29 @@ template class AlignedVector3 } }; +namespace internal { + +template +struct evaluator > + : evaluator,Aligned> >::type +{ + typedef AlignedVector3 XprType; + typedef Map,Aligned> MapType; + typedef typename evaluator::type Base; + + typedef evaluator type; + typedef evaluator nestedType; + + evaluator(const XprType &m) : Base(MapType(m.data())), m_map(m.data()) + { + ::new (static_cast(this)) Base(m_map); + } + + const MapType m_map; +}; + +} + //@} } -- cgit v1.2.3 From 62bce6e5e6da71dd8d85ae229d24b9f9f13d1681 Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Thu, 18 Sep 2014 17:31:17 +0200 Subject: Make MatrixFunction use nested_eval instead of nested --- unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h | 5 +++-- unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h | 11 ++++++----- unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h | 11 ++++++----- unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h | 8 +++++--- unsupported/test/CMakeLists.txt | 10 +++++----- 5 files changed, 25 insertions(+), 20 deletions(-) (limited to 'unsupported/Eigen') diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h index 160120d03..9e0545660 100644 --- a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h +++ b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h @@ -392,14 +392,15 @@ template struct MatrixExponentialReturnValue template inline void evalTo(ResultType& result) const { - internal::matrix_exp_compute(m_src, result); + const typename internal::nested_eval::type tmp(m_src); + internal::matrix_exp_compute(tmp, result); } Index rows() const { return m_src.rows(); } Index cols() const { return m_src.cols(); } protected: - const typename internal::nested::type m_src; + const typename internal::nested::type m_src; }; namespace internal { diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h index a35c11be5..b68aae5e8 100644 --- a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h +++ b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h @@ -485,7 +485,7 @@ template class MatrixFunctionReturnValue typedef typename internal::stem_function::type StemFunction; protected: - typedef typename internal::nested::type DerivedNested; + typedef typename internal::nested::type DerivedNested; public: @@ -503,18 +503,19 @@ template class MatrixFunctionReturnValue template inline void evalTo(ResultType& result) const { - typedef typename internal::remove_all::type DerivedNestedClean; - typedef internal::traits Traits; + typedef typename internal::nested_eval::type NestedEvalType; + typedef typename internal::remove_all::type NestedEvalTypeClean; + typedef internal::traits Traits; static const int RowsAtCompileTime = Traits::RowsAtCompileTime; static const int ColsAtCompileTime = Traits::ColsAtCompileTime; - static const int Options = DerivedNestedClean::Options; + static const int Options = NestedEvalTypeClean::Options; typedef std::complex::Real> ComplexScalar; typedef Matrix DynMatrixType; typedef internal::MatrixFunctionAtomic AtomicType; AtomicType atomic(m_f); - internal::matrix_function_compute::run(m_A, atomic, result); + internal::matrix_function_compute::run(m_A, atomic, result); } Index rows() const { return m_A.rows(); } diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h index d46ccc145..42b60b9b1 100644 --- a/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h +++ b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h @@ -310,7 +310,7 @@ public: typedef typename Derived::Index Index; protected: - typedef typename internal::nested::type DerivedNested; + typedef typename internal::nested::type DerivedNested; public: @@ -327,17 +327,18 @@ public: template inline void evalTo(ResultType& result) const { - typedef typename internal::remove_all::type DerivedNestedClean; - typedef internal::traits Traits; + typedef typename internal::nested_eval::type DerivedEvalType; + typedef typename internal::remove_all::type DerivedEvalTypeClean; + typedef internal::traits Traits; static const int RowsAtCompileTime = Traits::RowsAtCompileTime; static const int ColsAtCompileTime = Traits::ColsAtCompileTime; - static const int Options = DerivedNestedClean::Options; + static const int Options = DerivedEvalTypeClean::Options; typedef std::complex::Real> ComplexScalar; typedef Matrix DynMatrixType; typedef internal::MatrixLogarithmAtomic AtomicType; AtomicType atomic; - internal::matrix_function_compute::run(m_A, atomic, result); + internal::matrix_function_compute::run(m_A, atomic, result); } Index rows() const { return m_A.rows(); } diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h b/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h index 8ca4f4864..3a4d6eb3f 100644 --- a/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h +++ b/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h @@ -320,7 +320,7 @@ template class MatrixSquareRootReturnValue { protected: typedef typename Derived::Index Index; - typedef typename internal::nested::type DerivedNested; + typedef typename internal::nested::type DerivedNested; public: /** \brief Constructor. @@ -338,8 +338,10 @@ template class MatrixSquareRootReturnValue template inline void evalTo(ResultType& result) const { - typedef typename internal::remove_all::type DerivedNestedClean; - internal::matrix_sqrt_compute::run(m_src, result); + typedef typename internal::nested_eval::type DerivedEvalType; + typedef typename internal::remove_all::type DerivedEvalTypeClean; + DerivedEvalType tmp(m_src); + internal::matrix_sqrt_compute::run(tmp, result); } Index rows() const { return m_src.rows(); } diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt index e45efbd39..7e2186d2d 100644 --- a/unsupported/test/CMakeLists.txt +++ b/unsupported/test/CMakeLists.txt @@ -36,11 +36,11 @@ if (NOT CMAKE_CXX_COMPILER MATCHES "clang\\+\\+$") ei_add_test(BVH) endif() -# TODO ei_add_test(matrix_exponential) -# TODO ei_add_test(matrix_function) -# TODO ei_add_test(matrix_power) -# TODO ei_add_test(matrix_square_root) -# TODO ei_add_test(alignedvector3) +ei_add_test(matrix_exponential) +ei_add_test(matrix_function) +ei_add_test(matrix_power) +ei_add_test(matrix_square_root) +ei_add_test(alignedvector3) ei_add_test(FFT) -- cgit v1.2.3 From 2ae20d558b33653b8ef2fe17255ed171997bcf79 Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Thu, 18 Sep 2014 22:08:49 +0200 Subject: Update KroneckerProduct wrt evaluator changes --- .../src/KroneckerProduct/KroneckerTensorProduct.h | 36 ++++++++++++++++++---- unsupported/test/CMakeLists.txt | 2 +- 2 files changed, 31 insertions(+), 7 deletions(-) (limited to 'unsupported/Eigen') diff --git a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h index ca66d4d89..72e25db19 100644 --- a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h +++ b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h @@ -154,16 +154,41 @@ void KroneckerProductSparse::evalTo(Dest& dst) const dst.resize(this->rows(), this->cols()); dst.resizeNonZeros(0); + // 1 - evaluate the operands if needed: + typedef typename internal::nested_eval::type Lhs1; + typedef typename internal::remove_all::type Lhs1Cleaned; + const Lhs1 lhs1(m_A); + typedef typename internal::nested_eval::type Rhs1; + typedef typename internal::remove_all::type Rhs1Cleaned; + const Rhs1 rhs1(m_B); + + // 2 - construct a SparseView for dense operands + typedef typename internal::conditional::StorageKind,Sparse>::value, Lhs1, SparseView >::type Lhs2; + typedef typename internal::remove_all::type Lhs2Cleaned; + const Lhs2 lhs2(lhs1); + typedef typename internal::conditional::StorageKind,Sparse>::value, Rhs1, SparseView >::type Rhs2; + typedef typename internal::remove_all::type Rhs2Cleaned; + const Rhs2 rhs2(rhs1); + + // 3 - construct respective evaluators + typedef typename internal::evaluator::type LhsEval; + LhsEval lhsEval(lhs2); + typedef typename internal::evaluator::type RhsEval; + RhsEval rhsEval(rhs2); + + typedef typename LhsEval::InnerIterator LhsInnerIterator; + typedef typename RhsEval::InnerIterator RhsInnerIterator; + // compute number of non-zeros per innervectors of dst { VectorXi nnzA = VectorXi::Zero(Dest::IsRowMajor ? m_A.rows() : m_A.cols()); for (Index kA=0; kA < m_A.outerSize(); ++kA) - for (typename Lhs::InnerIterator itA(m_A,kA); itA; ++itA) + for (LhsInnerIterator itA(lhsEval,kA); itA; ++itA) nnzA(Dest::IsRowMajor ? itA.row() : itA.col())++; VectorXi nnzB = VectorXi::Zero(Dest::IsRowMajor ? m_B.rows() : m_B.cols()); for (Index kB=0; kB < m_B.outerSize(); ++kB) - for (typename Rhs::InnerIterator itB(m_B,kB); itB; ++itB) + for (RhsInnerIterator itB(rhsEval,kB); itB; ++itB) nnzB(Dest::IsRowMajor ? itB.row() : itB.col())++; Matrix nnzAB = nnzB * nnzA.transpose(); @@ -174,9 +199,9 @@ void KroneckerProductSparse::evalTo(Dest& dst) const { for (Index kB=0; kB < m_B.outerSize(); ++kB) { - for (typename Lhs::InnerIterator itA(m_A,kA); itA; ++itA) + for (LhsInnerIterator itA(lhsEval,kA); itA; ++itA) { - for (typename Rhs::InnerIterator itB(m_B,kB); itB; ++itB) + for (RhsInnerIterator itB(rhsEval,kB); itB; ++itB) { const Index i = itA.row() * Br + itB.row(), j = itA.col() * Bc + itB.col(); @@ -201,8 +226,7 @@ struct traits > Rows = size_at_compile_time::RowsAtCompileTime, traits::RowsAtCompileTime>::ret, Cols = size_at_compile_time::ColsAtCompileTime, traits::ColsAtCompileTime>::ret, MaxRows = size_at_compile_time::MaxRowsAtCompileTime, traits::MaxRowsAtCompileTime>::ret, - MaxCols = size_at_compile_time::MaxColsAtCompileTime, traits::MaxColsAtCompileTime>::ret, - CoeffReadCost = Lhs::CoeffReadCost + Rhs::CoeffReadCost + NumTraits::MulCost + MaxCols = size_at_compile_time::MaxColsAtCompileTime, traits::MaxColsAtCompileTime>::ret }; typedef Matrix ReturnType; diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt index 7e2186d2d..04863a9ad 100644 --- a/unsupported/test/CMakeLists.txt +++ b/unsupported/test/CMakeLists.txt @@ -93,7 +93,7 @@ ei_add_test(gmres) ei_add_test(minres) ei_add_test(levenberg_marquardt) ei_add_test(bdcsvd) -# TODO ei_add_test(kronecker_product) +ei_add_test(kronecker_product) option(EIGEN_TEST_CXX11 "Enable testing of C++11 features (e.g. Tensor module)." OFF) if(EIGEN_TEST_CXX11) -- cgit v1.2.3 From e70506dd8f63241d92ebc3df2d9c01c9af5564c8 Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Thu, 18 Sep 2014 22:46:46 +0200 Subject: Fix inner-stride of AlignedVector3 --- unsupported/Eigen/AlignedVector3 | 16 +++++++--------- unsupported/test/CMakeLists.txt | 4 ++-- 2 files changed, 9 insertions(+), 11 deletions(-) (limited to 'unsupported/Eigen') diff --git a/unsupported/Eigen/AlignedVector3 b/unsupported/Eigen/AlignedVector3 index cd5f49edb..35493e87b 100644 --- a/unsupported/Eigen/AlignedVector3 +++ b/unsupported/Eigen/AlignedVector3 @@ -60,6 +60,7 @@ template class AlignedVector3 Scalar* data() { return m_coeffs.data(); } const Scalar* data() const { return m_coeffs.data(); } + Index innerStride() const { return 1; } inline const Scalar& coeff(Index row, Index col) const { return m_coeffs.coeff(row, col); } @@ -184,27 +185,24 @@ template class AlignedVector3 { return m_coeffs.template head<3>().isApprox(other,eps); } + + CoeffType& coeffs() { return m_coeffs; } + const CoeffType& coeffs() const { return m_coeffs; } }; namespace internal { template struct evaluator > - : evaluator,Aligned> >::type + : evaluator >::type { typedef AlignedVector3 XprType; - typedef Map,Aligned> MapType; - typedef typename evaluator::type Base; + typedef typename evaluator >::type Base; typedef evaluator type; typedef evaluator nestedType; - evaluator(const XprType &m) : Base(MapType(m.data())), m_map(m.data()) - { - ::new (static_cast(this)) Base(m_map); - } - - const MapType m_map; + evaluator(const XprType &m) : Base(m.coeffs()) {} }; } diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt index 04863a9ad..48b61cde0 100644 --- a/unsupported/test/CMakeLists.txt +++ b/unsupported/test/CMakeLists.txt @@ -29,8 +29,8 @@ endif(ADOLC_FOUND) ei_add_test(NonLinearOptimization) ei_add_test(NumericalDiff) -# TODO ei_add_test(autodiff_scalar) -# TODO ei_add_test(autodiff) +ei_add_test(autodiff_scalar) +ei_add_test(autodiff) if (NOT CMAKE_CXX_COMPILER MATCHES "clang\\+\\+$") ei_add_test(BVH) -- cgit v1.2.3