From ff46ec0f240ef84e2293b33b265c703e9b765c2e Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Mon, 22 Sep 2014 23:33:28 +0200 Subject: bug #881: make SparseMatrixBase::isApprox(SparseMatrixBase) exploits sparse computations instead of converting the operands to dense matrices. --- Eigen/src/SparseCore/SparseFuzzy.h | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) (limited to 'Eigen/src/SparseCore/SparseFuzzy.h') diff --git a/Eigen/src/SparseCore/SparseFuzzy.h b/Eigen/src/SparseCore/SparseFuzzy.h index 45f36e9eb..3e67cbf5f 100644 --- a/Eigen/src/SparseCore/SparseFuzzy.h +++ b/Eigen/src/SparseCore/SparseFuzzy.h @@ -1,7 +1,7 @@ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // -// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2008-2014 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed @@ -10,17 +10,21 @@ #ifndef EIGEN_SPARSE_FUZZY_H #define EIGEN_SPARSE_FUZZY_H -// template -// template -// bool SparseMatrixBase::isApprox( -// const OtherDerived& other, -// typename NumTraits::Real prec -// ) const -// { -// const typename internal::nested::type nested(derived()); -// const typename internal::nested::type otherNested(other.derived()); -// return (nested - otherNested).cwise().abs2().sum() -// <= prec * prec * (std::min)(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum()); -// } +namespace Eigen { + +template +template +bool SparseMatrixBase::isApprox(const SparseMatrixBase& other, const RealScalar &prec) const +{ + using std::min; + const typename internal::nested_eval::type actualA(derived()); + typename internal::conditional::type, + const PlainObject>::type actualB(other.derived()); + + return (actualA - actualB).squaredNorm() <= prec * prec * (min)(actualA.squaredNorm(), actualB.squaredNorm()); +} + +} // end namespace Eigen #endif // EIGEN_SPARSE_FUZZY_H -- cgit v1.2.3 From de0d8a010e8cee66901786e0e2819beeaa5cb253 Mon Sep 17 00:00:00 2001 From: Christoph Hertzberg Date: Tue, 23 Sep 2014 12:58:14 +0200 Subject: Suppress stupid gcc-4.4 warning --- Eigen/src/SparseCore/SparseFuzzy.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'Eigen/src/SparseCore/SparseFuzzy.h') diff --git a/Eigen/src/SparseCore/SparseFuzzy.h b/Eigen/src/SparseCore/SparseFuzzy.h index 3e67cbf5f..a76c1a5e0 100644 --- a/Eigen/src/SparseCore/SparseFuzzy.h +++ b/Eigen/src/SparseCore/SparseFuzzy.h @@ -18,7 +18,7 @@ bool SparseMatrixBase::isApprox(const SparseMatrixBase& o { using std::min; const typename internal::nested_eval::type actualA(derived()); - typename internal::conditional::type, const PlainObject>::type actualB(other.derived()); -- cgit v1.2.3 From fe57b2f963da832d14f4d7b6d4a9554ceef26e36 Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Mon, 20 Oct 2014 15:55:32 +0200 Subject: bug #701: workaround (min) and (max) blocking ADL by introducing numext::mini and numext::maxi internal functions and a EIGEN_NOT_A_MACRO macro. --- Eigen/src/Cholesky/LDLT.h | 3 +-- Eigen/src/Core/Diagonal.h | 5 ++--- Eigen/src/Core/Fuzzy.h | 3 +-- Eigen/src/Core/GenericPacketMath.h | 4 ++-- Eigen/src/Core/MathFunctions.h | 16 ++++++++++++++++ Eigen/src/Core/StableNorm.h | 10 +++------- Eigen/src/Core/functors/BinaryFunctors.h | 6 ++---- Eigen/src/Core/util/Macros.h | 5 +++++ Eigen/src/Eigenvalues/EigenSolver.h | 6 ++---- Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h | 3 +-- Eigen/src/Geometry/Quaternion.h | 3 +-- Eigen/src/SVD/JacobiSVD.h | 3 +-- Eigen/src/SparseCore/SparseFuzzy.h | 3 +-- Eigen/src/SparseQR/SparseQR.h | 3 +-- unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h | 1 - unsupported/Eigen/src/BDCSVD/BDCSVD.h | 8 +++----- .../Eigen/src/IterativeSolvers/IncompleteCholesky.h | 3 +-- 17 files changed, 43 insertions(+), 42 deletions(-) (limited to 'Eigen/src/SparseCore/SparseFuzzy.h') diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h index dfc473df1..5acbf4651 100644 --- a/Eigen/src/Cholesky/LDLT.h +++ b/Eigen/src/Cholesky/LDLT.h @@ -488,11 +488,10 @@ void LDLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) cons // dst = D^-1 (L^-1 P b) // more precisely, use pseudo-inverse of D (see bug 241) using std::abs; - EIGEN_USING_STD_MATH(max); const typename Diagonal::RealReturnType vecD(vectorD()); // In some previous versions, tolerance was set to the max of 1/highest and the maximal diagonal entry * epsilon // as motivated by LAPACK's xGELSS: - // RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() *NumTraits::epsilon(),RealScalar(1) / NumTraits::highest()); + // RealScalar tolerance = numext::maxi(vectorD.array().abs().maxCoeff() *NumTraits::epsilon(),RealScalar(1) / NumTraits::highest()); // However, LDLT is not rank revealing, and so adjusting the tolerance wrt to the highest // diagonal element is not well justified and to numerical issues in some cases. // Moreover, Lapack's xSYTRS routines use 0 for the tolerance. diff --git a/Eigen/src/Core/Diagonal.h b/Eigen/src/Core/Diagonal.h index 6ffc0c762..26a58d664 100644 --- a/Eigen/src/Core/Diagonal.h +++ b/Eigen/src/Core/Diagonal.h @@ -77,9 +77,8 @@ template class Diagonal EIGEN_DEVICE_FUNC inline Index rows() const { - EIGEN_USING_STD_MATH(min); - return m_index.value()<0 ? (min)(Index(m_matrix.cols()),Index(m_matrix.rows()+m_index.value())) - : (min)(Index(m_matrix.rows()),Index(m_matrix.cols()-m_index.value())); + return m_index.value()<0 ? numext::mini(Index(m_matrix.cols()),Index(m_matrix.rows()+m_index.value())) + : numext::mini(Index(m_matrix.rows()),Index(m_matrix.cols()-m_index.value())); } EIGEN_DEVICE_FUNC diff --git a/Eigen/src/Core/Fuzzy.h b/Eigen/src/Core/Fuzzy.h index 8cd069a0d..3e403a09d 100644 --- a/Eigen/src/Core/Fuzzy.h +++ b/Eigen/src/Core/Fuzzy.h @@ -22,10 +22,9 @@ struct isApprox_selector EIGEN_DEVICE_FUNC static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec) { - EIGEN_USING_STD_MATH(min); typename internal::nested_eval::type nested(x); typename internal::nested_eval::type otherNested(y); - return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * (min)(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum()); + return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * numext::mini(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum()); } }; diff --git a/Eigen/src/Core/GenericPacketMath.h b/Eigen/src/Core/GenericPacketMath.h index a1fcb82fb..065ccf7ac 100644 --- a/Eigen/src/Core/GenericPacketMath.h +++ b/Eigen/src/Core/GenericPacketMath.h @@ -126,12 +126,12 @@ pdiv(const Packet& a, /** \internal \returns the min of \a a and \a b (coeff-wise) */ template EIGEN_DEVICE_FUNC inline Packet pmin(const Packet& a, - const Packet& b) { EIGEN_USING_STD_MATH(min); return (min)(a, b); } + const Packet& b) { return numext::mini(a, b); } /** \internal \returns the max of \a a and \a b (coeff-wise) */ template EIGEN_DEVICE_FUNC inline Packet pmax(const Packet& a, - const Packet& b) { EIGEN_USING_STD_MATH(max); return (max)(a, b); } + const Packet& b) { return numext::maxi(a, b); } /** \internal \returns the absolute value of \a a */ template EIGEN_DEVICE_FUNC inline Packet diff --git a/Eigen/src/Core/MathFunctions.h b/Eigen/src/Core/MathFunctions.h index 73859b0ee..071c234c2 100644 --- a/Eigen/src/Core/MathFunctions.h +++ b/Eigen/src/Core/MathFunctions.h @@ -591,6 +591,22 @@ inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random() ****************************************************************************/ namespace numext { + +template +EIGEN_DEVICE_FUNC +inline T mini(const T& x, const T& y) +{ + using std::min; + return min EIGEN_NOT_A_MACRO (x,y); +} + +template +EIGEN_DEVICE_FUNC +inline T maxi(const T& x, const T& y) +{ + using std::max; + return max EIGEN_NOT_A_MACRO (x,y); +} template EIGEN_DEVICE_FUNC diff --git a/Eigen/src/Core/StableNorm.h b/Eigen/src/Core/StableNorm.h index 64d43e1b1..0b7e39827 100644 --- a/Eigen/src/Core/StableNorm.h +++ b/Eigen/src/Core/StableNorm.h @@ -17,7 +17,6 @@ namespace internal { template inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& scale, Scalar& invScale) { - using std::max; Scalar maxCoeff = bl.cwiseAbs().maxCoeff(); if(maxCoeff>scale) @@ -58,8 +57,6 @@ blueNorm_impl(const EigenBase& _vec) typedef typename Derived::RealScalar RealScalar; typedef typename Derived::Index Index; using std::pow; - EIGEN_USING_STD_MATH(min); - EIGEN_USING_STD_MATH(max); using std::sqrt; using std::abs; const Derived& vec(_vec.derived()); @@ -136,8 +133,8 @@ blueNorm_impl(const EigenBase& _vec) } else return sqrt(amed); - asml = (min)(abig, amed); - abig = (max)(abig, amed); + asml = numext::mini(abig, amed); + abig = numext::maxi(abig, amed); if(asml <= abig*relerr) return abig; else @@ -160,7 +157,6 @@ template inline typename NumTraits::Scalar>::Real MatrixBase::stableNorm() const { - EIGEN_USING_STD_MATH(min); using std::sqrt; const Index blockSize = 4096; RealScalar scale(0); @@ -174,7 +170,7 @@ MatrixBase::stableNorm() const if (bi>0) internal::stable_norm_kernel(this->head(bi), ssq, scale, invScale); for (; bisegment(bi,(min)(blockSize, n - bi)).template forceAlignedAccessIf(), ssq, scale, invScale); + internal::stable_norm_kernel(this->segment(bi,numext::mini(blockSize, n - bi)).template forceAlignedAccessIf(), ssq, scale, invScale); return scale * sqrt(ssq); } diff --git a/Eigen/src/Core/functors/BinaryFunctors.h b/Eigen/src/Core/functors/BinaryFunctors.h index 157d075a7..9c96181c7 100644 --- a/Eigen/src/Core/functors/BinaryFunctors.h +++ b/Eigen/src/Core/functors/BinaryFunctors.h @@ -115,7 +115,7 @@ struct functor_traits > { */ template struct scalar_min_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_min_op) - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { EIGEN_USING_STD_MATH(min); return (min)(a, b); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return numext::mini(a, b); } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const { return internal::pmin(a,b); } @@ -138,7 +138,7 @@ struct functor_traits > { */ template struct scalar_max_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_max_op) - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { EIGEN_USING_STD_MATH(max); return (max)(a, b); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return numext::maxi(a, b); } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const { return internal::pmax(a,b); } @@ -164,8 +164,6 @@ template struct scalar_hypot_op { // typedef typename NumTraits::Real result_type; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& _x, const Scalar& _y) const { - EIGEN_USING_STD_MATH(max); - EIGEN_USING_STD_MATH(min); using std::sqrt; Scalar p, qp; if(_x>_y) diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h index f9b908e22..c09854951 100644 --- a/Eigen/src/Core/util/Macros.h +++ b/Eigen/src/Core/util/Macros.h @@ -86,6 +86,11 @@ #define EIGEN_ALIGN 0 #endif + +// This macro can be used to prevent from macro expansion, e.g.: +// std::max EIGNE_NOT_A_MACRO(a,b) +#define EIGEN_NOT_A_MACRO + // EIGEN_ALIGN_STATICALLY is the true test whether we want to align arrays on the stack or not. It takes into account both the user choice to explicitly disable // alignment (EIGEN_DONT_ALIGN_STATICALLY) and the architecture config (EIGEN_ARCH_WANTS_STACK_ALIGNMENT). Henceforth, only EIGEN_ALIGN_STATICALLY should be used. #if EIGEN_ARCH_WANTS_STACK_ALIGNMENT && !defined(EIGEN_DONT_ALIGN_STATICALLY) diff --git a/Eigen/src/Eigenvalues/EigenSolver.h b/Eigen/src/Eigenvalues/EigenSolver.h index 8a83b85bb..9372021ff 100644 --- a/Eigen/src/Eigenvalues/EigenSolver.h +++ b/Eigen/src/Eigenvalues/EigenSolver.h @@ -368,7 +368,6 @@ EigenSolver::compute(const MatrixType& matrix, bool computeEigenvect { using std::sqrt; using std::abs; - using std::max; using numext::isfinite; eigen_assert(matrix.cols() == matrix.rows()); @@ -409,7 +408,7 @@ EigenSolver::compute(const MatrixType& matrix, bool computeEigenvect { Scalar t0 = m_matT.coeff(i+1, i); Scalar t1 = m_matT.coeff(i, i+1); - Scalar maxval = (max)(abs(p),(max)(abs(t0),abs(t1))); + Scalar maxval = numext::maxi(abs(p),numext::maxi(abs(t0),abs(t1))); t0 /= maxval; t1 /= maxval; Scalar p0 = p/maxval; @@ -600,8 +599,7 @@ void EigenSolver::doComputeEigenvectors() } // Overflow control - EIGEN_USING_STD_MATH(max); - Scalar t = (max)(abs(m_matT.coeff(i,n-1)),abs(m_matT.coeff(i,n))); + Scalar t = numext::maxi(abs(m_matT.coeff(i,n-1)),abs(m_matT.coeff(i,n))); if ((eps * t) * t > Scalar(1)) m_matT.block(i, n-1, size-i, 2) /= t; diff --git a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h index 1dd2ab45b..54f60b197 100644 --- a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +++ b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h @@ -732,7 +732,6 @@ struct direct_selfadjoint_eigenvalues EIGEN_DEVICE_FUNC static inline void run(SolverType& solver, const MatrixType& mat, int options) { - EIGEN_USING_STD_MATH(max) EIGEN_USING_STD_MATH(sqrt); eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows()); @@ -746,7 +745,7 @@ struct direct_selfadjoint_eigenvalues // map the matrix coefficients to [-1:1] to avoid over- and underflow. Scalar scale = mat.cwiseAbs().maxCoeff(); - scale = (max)(scale,Scalar(1)); + scale = numext::maxi(scale,Scalar(1)); MatrixType scaledMat = mat / scale; // Compute the eigenvalues diff --git a/Eigen/src/Geometry/Quaternion.h b/Eigen/src/Geometry/Quaternion.h index 216e5b12f..508eba767 100644 --- a/Eigen/src/Geometry/Quaternion.h +++ b/Eigen/src/Geometry/Quaternion.h @@ -571,7 +571,6 @@ template template inline Derived& QuaternionBase::setFromTwoVectors(const MatrixBase& a, const MatrixBase& b) { - EIGEN_USING_STD_MATH(max); using std::sqrt; Vector3 v0 = a.normalized(); Vector3 v1 = b.normalized(); @@ -587,7 +586,7 @@ inline Derived& QuaternionBase::setFromTwoVectors(const MatrixBase::dummy_precision()) { - c = (max)(c,Scalar(-1)); + c = numext::maxi(c,Scalar(-1)); Matrix m; m << v0.transpose(), v1.transpose(); JacobiSVD > svd(m, ComputeFullV); Vector3 axis = svd.matrixV().col(2); diff --git a/Eigen/src/SVD/JacobiSVD.h b/Eigen/src/SVD/JacobiSVD.h index 7a8aa8d3f..0f7e5b8fe 100644 --- a/Eigen/src/SVD/JacobiSVD.h +++ b/Eigen/src/SVD/JacobiSVD.h @@ -723,8 +723,7 @@ JacobiSVD::compute(const MatrixType& matrix, unsig // if this 2x2 sub-matrix is not diagonal already... // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't // keep us iterating forever. Similarly, small denormal numbers are considered zero. - EIGEN_USING_STD_MATH(max); - RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)), + RealScalar threshold = numext::maxi(considerAsZero, precision * numext::maxi(abs(m_workMatrix.coeff(p,p)), abs(m_workMatrix.coeff(q,q)))); // We compare both values to threshold instead of calling max to be robust to NaN (See bug 791) if(abs(m_workMatrix.coeff(p,q))>threshold || abs(m_workMatrix.coeff(q,p)) > threshold) diff --git a/Eigen/src/SparseCore/SparseFuzzy.h b/Eigen/src/SparseCore/SparseFuzzy.h index a76c1a5e0..7d47eb94d 100644 --- a/Eigen/src/SparseCore/SparseFuzzy.h +++ b/Eigen/src/SparseCore/SparseFuzzy.h @@ -16,13 +16,12 @@ template template bool SparseMatrixBase::isApprox(const SparseMatrixBase& other, const RealScalar &prec) const { - using std::min; const typename internal::nested_eval::type actualA(derived()); typename internal::conditional::type, const PlainObject>::type actualB(other.derived()); - return (actualA - actualB).squaredNorm() <= prec * prec * (min)(actualA.squaredNorm(), actualB.squaredNorm()); + return (actualA - actualB).squaredNorm() <= prec * prec * numext::mini(actualA.squaredNorm(), actualB.squaredNorm()); } } // end namespace Eigen diff --git a/Eigen/src/SparseQR/SparseQR.h b/Eigen/src/SparseQR/SparseQR.h index 879ac553d..133211488 100644 --- a/Eigen/src/SparseQR/SparseQR.h +++ b/Eigen/src/SparseQR/SparseQR.h @@ -325,7 +325,6 @@ template void SparseQR::factorize(const MatrixType& mat) { using std::abs; - using std::max; eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step"); Index m = mat.rows(); @@ -377,7 +376,7 @@ void SparseQR::factorize(const MatrixType& mat) if(m_useDefaultThreshold) { RealScalar max2Norm = 0.0; - for (int j = 0; j < n; j++) max2Norm = (max)(max2Norm, m_pmat.col(j).norm()); + for (int j = 0; j < n; j++) max2Norm = numext::maxi(max2Norm, m_pmat.col(j).norm()); if(max2Norm==RealScalar(0)) max2Norm = RealScalar(1); pivotThreshold = 20 * (m + n) * max2Norm * NumTraits::epsilon(); diff --git a/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h b/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h index 590797973..8336c2644 100644 --- a/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h +++ b/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h @@ -593,7 +593,6 @@ inline const AutoDiffScalar::Scalar,D atan2(const AutoDiffScalar& a, const AutoDiffScalar& b) { using std::atan2; - using std::max; typedef typename internal::traits::Scalar Scalar; typedef AutoDiffScalar > PlainADS; PlainADS ret; diff --git a/unsupported/Eigen/src/BDCSVD/BDCSVD.h b/unsupported/Eigen/src/BDCSVD/BDCSVD.h index e2551cf88..ac71b0aa8 100644 --- a/unsupported/Eigen/src/BDCSVD/BDCSVD.h +++ b/unsupported/Eigen/src/BDCSVD/BDCSVD.h @@ -649,7 +649,6 @@ void BDCSVD::computeSingVals(const ArrayXr& col0, const ArrayXr& dia { using std::abs; using std::swap; - using std::max; Index n = col0.size(); Index actual_n = n; @@ -728,7 +727,7 @@ void BDCSVD::computeSingVals(const ArrayXr& col0, const ArrayXr& dia // rational interpolation: fit a function of the form a / mu + b through the two previous // iterates and use its zero to compute the next iterate bool useBisection = fPrev*fCur>0; - while (fCur!=0 && abs(muCur - muPrev) > 8 * NumTraits::epsilon() * (max)(abs(muCur), abs(muPrev)) && abs(fCur - fPrev)>NumTraits::epsilon() && !useBisection) + while (fCur!=0 && abs(muCur - muPrev) > 8 * NumTraits::epsilon() * numext::maxi(abs(muCur), abs(muPrev)) && abs(fCur - fPrev)>NumTraits::epsilon() && !useBisection) { ++m_numIters; @@ -779,7 +778,7 @@ void BDCSVD::computeSingVals(const ArrayXr& col0, const ArrayXr& dia #endif eigen_internal_assert(fLeft * fRight < 0); - while (rightShifted - leftShifted > 2 * NumTraits::epsilon() * (max)(abs(leftShifted), abs(rightShifted))) + while (rightShifted - leftShifted > 2 * NumTraits::epsilon() * numext::maxi(abs(leftShifted), abs(rightShifted))) { RealScalar midShifted = (leftShifted + rightShifted) / 2; RealScalar fMid = secularEq(midShifted, col0, diag, perm, diagShifted, shift); @@ -981,7 +980,6 @@ void BDCSVD::deflation(Index firstCol, Index lastCol, Index k, Index { using std::sqrt; using std::abs; - using std::max; const Index length = lastCol + 1 - firstCol; Block col0(m_computed, firstCol+shift, firstCol+shift, length, 1); @@ -990,7 +988,7 @@ void BDCSVD::deflation(Index firstCol, Index lastCol, Index k, Index RealScalar maxDiag = diag.tail((std::max)(Index(1),length-1)).cwiseAbs().maxCoeff(); RealScalar epsilon_strict = NumTraits::epsilon() * maxDiag; - RealScalar epsilon_coarse = 8 * NumTraits::epsilon() * (max)(col0.cwiseAbs().maxCoeff(), maxDiag); + RealScalar epsilon_coarse = 8 * NumTraits::epsilon() * numext::maxi(col0.cwiseAbs().maxCoeff(), maxDiag); #ifdef EIGEN_BDCSVD_SANITY_CHECKS assert(m_naiveU.allFinite()); diff --git a/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h b/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h index dd43de6b3..35cfa315d 100644 --- a/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h +++ b/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h @@ -126,7 +126,6 @@ template void IncompleteCholesky::factorize(const _MatrixType& mat) { using std::sqrt; - using std::min; eigen_assert(m_analysisIsOk && "analyzePattern() should be called first"); // Dropping strategies : Keep only the p largest elements per column, where p is the number of elements in the column of the original matrix. Other strategies will be added @@ -160,7 +159,7 @@ void IncompleteCholesky::factorize(const _MatrixType for (int j = 0; j < n; j++){ for (int k = colPtr[j]; k < colPtr[j+1]; k++) vals[k] /= (m_scal(j) * m_scal(rowIdx[k])); - mindiag = (min)(vals[colPtr[j]], mindiag); + mindiag = numext::mini(vals[colPtr[j]], mindiag); } if(mindiag < Scalar(0.)) m_shift = m_shift - mindiag; -- cgit v1.2.3